From 5289e0f24e568fc2aad4a15334464ce760cd1655 Mon Sep 17 00:00:00 2001 From: Junyang Shao Date: Thu, 12 Jun 2025 03:54:34 +0000 Subject: [PATCH] [dev.simd] cmd/compile: updates simd ordering and docs This CL is generated by CL 681395. Change-Id: Ic930aeeb24fc7f95a4d74c77403532d0b0eb39ff Reviewed-on: https://go-review.googlesource.com/c/go/+/681215 Auto-Submit: Junyang Shao Reviewed-by: Junyang Shao LUCI-TryBot-Result: Go LUCI Reviewed-by: Cherry Mui --- src/cmd/compile/internal/amd64/simdssa.go | 3033 +- .../compile/internal/ssa/_gen/simdAMD64.rules | 2089 +- .../compile/internal/ssa/_gen/simdAMD64ops.go | 1186 +- src/cmd/compile/internal/ssa/opGen.go | 3597 +-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 23824 +++++++--------- .../compile/internal/ssagen/simdintrinsics.go | 704 +- src/simd/stubs_amd64.go | 4210 +-- src/simd/types_amd64.go | 480 +- 8 files changed, 17115 insertions(+), 22008 deletions(-) diff --git a/src/cmd/compile/internal/amd64/simdssa.go b/src/cmd/compile/internal/amd64/simdssa.go index d8d1a4c1a4..253bec09ca 100644 --- a/src/cmd/compile/internal/amd64/simdssa.go +++ b/src/cmd/compile/internal/amd64/simdssa.go @@ -10,2311 +10,870 @@ import ( ) func ssaGenSIMDValue(s *ssagen.State, v *ssa.Value) bool { - p := s.Prog(v.Op.Asm()) - // First arg + var p *obj.Prog switch v.Op { - // Immediates - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPCMPW512: - imm := v.AuxInt - if imm < 0 || imm > 255 { - v.Fatalf("Invalid source selection immediate") - } - p.From.Offset = imm - p.From.Type = obj.TYPE_CONST - - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPMULHUWMasked512, + case ssa.OpAMD64VPABSW256, + ssa.OpAMD64VPABSW128, + ssa.OpAMD64VPABSD128, + ssa.OpAMD64VPABSD256, + ssa.OpAMD64VPABSB128, + ssa.OpAMD64VPABSB256, ssa.OpAMD64VPABSW512, + ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPABSQ128, + ssa.OpAMD64VPABSQ256, + ssa.OpAMD64VPABSQ512, + ssa.OpAMD64VPABSB512, + ssa.OpAMD64VRCP14PS512, + ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VRCP14PS256, + ssa.OpAMD64VRCP14PD128, + ssa.OpAMD64VRCP14PD256, + ssa.OpAMD64VRCP14PD512, + ssa.OpAMD64VRSQRTPS128, + ssa.OpAMD64VRSQRTPS256, + ssa.OpAMD64VRSQRT14PS512, + ssa.OpAMD64VRSQRT14PD128, ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VRSQRT14PD512, + ssa.OpAMD64VPOPCNTW256, ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, + ssa.OpAMD64VPOPCNTW128, + ssa.OpAMD64VPOPCNTD512, + ssa.OpAMD64VPOPCNTD128, + ssa.OpAMD64VPOPCNTD256, + ssa.OpAMD64VPOPCNTQ128, + ssa.OpAMD64VPOPCNTQ256, + ssa.OpAMD64VPOPCNTQ512, + ssa.OpAMD64VPOPCNTB128, + ssa.OpAMD64VPOPCNTB256, + ssa.OpAMD64VPOPCNTB512, + ssa.OpAMD64VSQRTPS128, + ssa.OpAMD64VSQRTPS256, + ssa.OpAMD64VSQRTPD128, + ssa.OpAMD64VSQRTPD256, + ssa.OpAMD64VSQRTPS512, + ssa.OpAMD64VSQRTPD512: + p = simdFp11(s, v) + + case ssa.OpAMD64VADDPS128, + ssa.OpAMD64VADDPS256, + ssa.OpAMD64VADDPD128, + ssa.OpAMD64VADDPD256, + ssa.OpAMD64VPADDW256, + ssa.OpAMD64VPADDW128, + ssa.OpAMD64VPADDD128, + ssa.OpAMD64VPADDD256, + ssa.OpAMD64VPADDQ128, + ssa.OpAMD64VPADDQ256, + ssa.OpAMD64VPADDB128, + ssa.OpAMD64VPADDB256, + ssa.OpAMD64VADDPS512, + ssa.OpAMD64VADDPD512, + ssa.OpAMD64VPADDW512, ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPADDQ512, + ssa.OpAMD64VPADDB512, + ssa.OpAMD64VANDPS128, + ssa.OpAMD64VANDPS256, ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VANDPD256, + ssa.OpAMD64VPAND256, + ssa.OpAMD64VPAND128, ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VANDPD512, + ssa.OpAMD64VPANDD512, + ssa.OpAMD64VPANDQ512, + ssa.OpAMD64VANDNPS128, + ssa.OpAMD64VANDNPS256, + ssa.OpAMD64VANDNPD128, ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, + ssa.OpAMD64VPANDN256, ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VANDNPS512, + ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPANDND512, + ssa.OpAMD64VPANDNQ512, + ssa.OpAMD64VPAVGW256, + ssa.OpAMD64VPAVGW128, + ssa.OpAMD64VPAVGB128, + ssa.OpAMD64VPAVGB256, + ssa.OpAMD64VPAVGW512, + ssa.OpAMD64VPAVGB512, + ssa.OpAMD64VDIVPS128, + ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VDIVPD128, + ssa.OpAMD64VDIVPD256, ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VDIVPD512, + ssa.OpAMD64VPCMPEQW256, + ssa.OpAMD64VPCMPEQW128, + ssa.OpAMD64VPCMPEQD128, + ssa.OpAMD64VPCMPEQD256, + ssa.OpAMD64VPCMPEQQ128, + ssa.OpAMD64VPCMPEQQ256, + ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPCMPEQB256, + ssa.OpAMD64VPCMPGTW256, + ssa.OpAMD64VPCMPGTW128, + ssa.OpAMD64VPCMPGTD128, + ssa.OpAMD64VPCMPGTD256, + ssa.OpAMD64VPCMPGTQ256, + ssa.OpAMD64VPCMPGTB128, + ssa.OpAMD64VPCMPGTB256, + ssa.OpAMD64VMAXPS128, + ssa.OpAMD64VMAXPS256, + ssa.OpAMD64VMAXPD128, + ssa.OpAMD64VMAXPD256, + ssa.OpAMD64VPMAXSW256, + ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMAXSD128, + ssa.OpAMD64VPMAXSD256, + ssa.OpAMD64VPMAXSB128, + ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VPMAXUW256, + ssa.OpAMD64VPMAXUW128, + ssa.OpAMD64VPMAXUD128, + ssa.OpAMD64VPMAXUD256, + ssa.OpAMD64VPMAXUB128, + ssa.OpAMD64VPMAXUB256, + ssa.OpAMD64VMAXPS512, ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, + ssa.OpAMD64VPMAXSW512, + ssa.OpAMD64VPMAXSD512, + ssa.OpAMD64VPMAXSQ128, + ssa.OpAMD64VPMAXSQ256, + ssa.OpAMD64VPMAXSQ512, + ssa.OpAMD64VPMAXSB512, + ssa.OpAMD64VPMAXUW512, + ssa.OpAMD64VPMAXUD512, + ssa.OpAMD64VPMAXUQ128, + ssa.OpAMD64VPMAXUQ256, + ssa.OpAMD64VPMAXUQ512, + ssa.OpAMD64VPMAXUB512, ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VMINPS256, + ssa.OpAMD64VMINPD128, + ssa.OpAMD64VMINPD256, + ssa.OpAMD64VPMINSW256, + ssa.OpAMD64VPMINSW128, + ssa.OpAMD64VPMINSD128, + ssa.OpAMD64VPMINSD256, + ssa.OpAMD64VPMINSB128, + ssa.OpAMD64VPMINSB256, + ssa.OpAMD64VPMINUW256, + ssa.OpAMD64VPMINUW128, + ssa.OpAMD64VPMINUD128, + ssa.OpAMD64VPMINUD256, + ssa.OpAMD64VPMINUB128, + ssa.OpAMD64VPMINUB256, ssa.OpAMD64VMINPS512, + ssa.OpAMD64VMINPD512, + ssa.OpAMD64VPMINSW512, + ssa.OpAMD64VPMINSD512, + ssa.OpAMD64VPMINSQ128, + ssa.OpAMD64VPMINSQ256, + ssa.OpAMD64VPMINSQ512, + ssa.OpAMD64VPMINSB512, + ssa.OpAMD64VPMINUW512, + ssa.OpAMD64VPMINUD512, + ssa.OpAMD64VPMINUQ128, + ssa.OpAMD64VPMINUQ256, + ssa.OpAMD64VPMINUQ512, + ssa.OpAMD64VPMINUB512, + ssa.OpAMD64VMULPS128, + ssa.OpAMD64VMULPS256, + ssa.OpAMD64VMULPD128, + ssa.OpAMD64VMULPD256, + ssa.OpAMD64VMULPS512, + ssa.OpAMD64VMULPD512, + ssa.OpAMD64VSCALEFPS512, + ssa.OpAMD64VSCALEFPS128, + ssa.OpAMD64VSCALEFPS256, + ssa.OpAMD64VSCALEFPD128, + ssa.OpAMD64VSCALEFPD256, + ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMULDQ128, + ssa.OpAMD64VPMULDQ256, + ssa.OpAMD64VPMULUDQ128, + ssa.OpAMD64VPMULUDQ256, ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, + ssa.OpAMD64VPMULUDQ512, + ssa.OpAMD64VPMULHW256, + ssa.OpAMD64VPMULHW128, + ssa.OpAMD64VPMULHUW256, + ssa.OpAMD64VPMULHUW128, + ssa.OpAMD64VPMULHW512, + ssa.OpAMD64VPMULHUW512, ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLW128, + ssa.OpAMD64VPMULLD128, + ssa.OpAMD64VPMULLD256, + ssa.OpAMD64VPMULLW512, + ssa.OpAMD64VPMULLD512, + ssa.OpAMD64VPMULLQ128, + ssa.OpAMD64VPMULLQ256, + ssa.OpAMD64VPMULLQ512, + ssa.OpAMD64VORPS128, + ssa.OpAMD64VORPS256, + ssa.OpAMD64VORPD128, + ssa.OpAMD64VORPD256, + ssa.OpAMD64VPOR256, + ssa.OpAMD64VPOR128, + ssa.OpAMD64VORPS512, + ssa.OpAMD64VORPD512, + ssa.OpAMD64VPORD512, + ssa.OpAMD64VPORQ512, + ssa.OpAMD64VHADDPS128, + ssa.OpAMD64VHADDPS256, + ssa.OpAMD64VHADDPD128, + ssa.OpAMD64VHADDPD256, + ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VPHADDW128, + ssa.OpAMD64VPHADDD128, + ssa.OpAMD64VPHADDD256, + ssa.OpAMD64VHSUBPS128, + ssa.OpAMD64VHSUBPS256, + ssa.OpAMD64VHSUBPD128, + ssa.OpAMD64VHSUBPD256, + ssa.OpAMD64VPHSUBW256, + ssa.OpAMD64VPHSUBW128, + ssa.OpAMD64VPHSUBD128, + ssa.OpAMD64VPHSUBD256, + ssa.OpAMD64VPADDSW256, + ssa.OpAMD64VPADDSW128, ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, + ssa.OpAMD64VPADDSB256, + ssa.OpAMD64VPADDSW512, + ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPHADDSW256, ssa.OpAMD64VPHADDSW128, + ssa.OpAMD64VPHSUBSW256, + ssa.OpAMD64VPHSUBSW128, + ssa.OpAMD64VPSUBSW256, + ssa.OpAMD64VPSUBSW128, + ssa.OpAMD64VPSUBSB128, + ssa.OpAMD64VPSUBSB256, + ssa.OpAMD64VPSUBSW512, + ssa.OpAMD64VPSUBSB512, + ssa.OpAMD64VPSIGNW256, + ssa.OpAMD64VPSIGNW128, + ssa.OpAMD64VPSIGND128, + ssa.OpAMD64VPSIGND256, + ssa.OpAMD64VPSIGNB128, + ssa.OpAMD64VPSIGNB256, + ssa.OpAMD64VPSUBW256, + ssa.OpAMD64VPSUBW128, + ssa.OpAMD64VPSUBD128, ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPSUBQ128, + ssa.OpAMD64VPSUBQ256, + ssa.OpAMD64VPSUBB128, + ssa.OpAMD64VPSUBB256, + ssa.OpAMD64VPSUBW512, + ssa.OpAMD64VPSUBD512, + ssa.OpAMD64VPSUBQ512, + ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VXORPS128, + ssa.OpAMD64VXORPS256, + ssa.OpAMD64VXORPD128, + ssa.OpAMD64VXORPD256, + ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPXOR128, + ssa.OpAMD64VXORPS512, + ssa.OpAMD64VXORPD512, + ssa.OpAMD64VPXORD512, + ssa.OpAMD64VPXORQ512: + p = simdFp21(s, v) + + case ssa.OpAMD64VPCMPEQW512, + ssa.OpAMD64VPCMPEQD512, + ssa.OpAMD64VPCMPEQQ512, + ssa.OpAMD64VPCMPEQB512, + ssa.OpAMD64VPCMPGTW512, + ssa.OpAMD64VPCMPGTD512, + ssa.OpAMD64VPCMPGTQ128, + ssa.OpAMD64VPCMPGTQ512, + ssa.OpAMD64VPCMPGTB512: + p = simdFp2k1(s, v) + + case ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, + ssa.OpAMD64VADDPDMasked128, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPADDWMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, + ssa.OpAMD64VPADDBMasked128, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, + ssa.OpAMD64VANDPSMasked512, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, + ssa.OpAMD64VPANDDMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, + ssa.OpAMD64VPANDQMasked512, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VPANDNDMasked512, ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, + ssa.OpAMD64VDIVPDMasked128, ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.From.Type = obj.TYPE_REG - p.From.Reg = simdReg(v.Args[0]) - - default: - // At least one arg is required. - return false - } - - // Second arg - switch v.Op { - // Registers - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[0])) - } else { - p.AddRestSourceReg(simdReg(v.Args[1])) - } - } - - // Third arg - switch v.Op { - // Registers - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, - ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[1])) - } else { - p.AddRestSourceReg(simdReg(v.Args[2])) - } - } - - // Fourth arg - switch v.Op { - case ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VPCMPUBMasked128: - if p.From.Type == obj.TYPE_CONST { - p.AddRestSourceReg(simdReg(v.Args[2])) - } else { - p.AddRestSourceReg(simdReg(v.Args[3])) - } - } - - // Output - switch v.Op { - case ssa.OpAMD64VPMINSW256, - ssa.OpAMD64VPMULLD256, - ssa.OpAMD64VORPD512, - ssa.OpAMD64VPCMPUBMasked512, - ssa.OpAMD64VPCMPUQ256, - ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VDIVPD256, - ssa.OpAMD64VPCMPEQW256, - ssa.OpAMD64VPCMPBMasked128, - ssa.OpAMD64VPCMPUWMasked128, - ssa.OpAMD64VHADDPS128, - ssa.OpAMD64VPXOR128, - ssa.OpAMD64VPADDQ128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VPCMPBMasked512, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMAXUB128, - ssa.OpAMD64VPSUBW256, - ssa.OpAMD64VPMAXSQ512, - ssa.OpAMD64VANDNPS512, - ssa.OpAMD64VPCMPUWMasked512, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPABSW512, - ssa.OpAMD64VPCMPD128, - ssa.OpAMD64VPCMPDMasked256, - ssa.OpAMD64VRSQRT14PD256, - ssa.OpAMD64VPHADDW128, - ssa.OpAMD64VPCMPGTQMasked256, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VCMPPD128, - ssa.OpAMD64VCMPPS256, - ssa.OpAMD64VPADDW512, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPCMPEQQ128, - ssa.OpAMD64VPAVGW128, - ssa.OpAMD64VPOR256, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPCMPDMasked128, - ssa.OpAMD64VPCMPUW256, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPOPCNTW512, - ssa.OpAMD64VORPD256, - ssa.OpAMD64VPANDN256, - ssa.OpAMD64VPANDD512, - ssa.OpAMD64VCMPPSMasked256, - ssa.OpAMD64VPCMPDMasked512, - ssa.OpAMD64VPORQ512, - ssa.OpAMD64VPCMPQ256, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPSUBSW128, - ssa.OpAMD64VPCMPQMasked512, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VPCMPUD128, - ssa.OpAMD64VPCMPGTB256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPCMPUW128, - ssa.OpAMD64VPCMPEQD256, - ssa.OpAMD64VPSUBSW512, - ssa.OpAMD64VPABSD512, - ssa.OpAMD64VPADDD512, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPCMPBMasked256, - ssa.OpAMD64VPMULUDQ512, - ssa.OpAMD64VPADDSW512, - ssa.OpAMD64VPCMPB256, - ssa.OpAMD64VPCMPUWMasked256, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VANDPD128, - ssa.OpAMD64VPCMPEQD128, - ssa.OpAMD64VPHSUBSW128, - ssa.OpAMD64VPADDSW256, - ssa.OpAMD64VPCMPUDMasked512, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VPMULHUW128, - ssa.OpAMD64VPCMPGTW512, - ssa.OpAMD64VPCMPWMasked128, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPOPCNTB256, - ssa.OpAMD64VCMPPD256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPMINSD512, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTB128, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VCMPPDMasked256, - ssa.OpAMD64VMAXPS128, - ssa.OpAMD64VPMULLD128, - ssa.OpAMD64VPSUBB256, - ssa.OpAMD64VMINPD128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VANDPS512, - ssa.OpAMD64VPCMPQMasked256, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VANDNPD256, - ssa.OpAMD64VPAND128, - ssa.OpAMD64VPANDN128, - ssa.OpAMD64VXORPD256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPCMPUQ512, - ssa.OpAMD64VPCMPUW512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VCMPPSMasked512, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPMULDQ256, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, - ssa.OpAMD64VPCMPEQW128, - ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULDQ128, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VDIVPS512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRCP14PS512, - ssa.OpAMD64VPCMPQMasked128, - ssa.OpAMD64VPCMPUBMasked256, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VPCMPUD512, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VPADDW256, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPCMPUQ128, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VCMPPS128, - ssa.OpAMD64VPCMPEQWMasked128, - ssa.OpAMD64VHSUBPS128, - ssa.OpAMD64VPMAXSDMasked512, - ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VADDPS256, - ssa.OpAMD64VHSUBPS256, - ssa.OpAMD64VPSUBB128, - ssa.OpAMD64VPCMPUB128, - ssa.OpAMD64VMAXPD512, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPCMPUQMasked128, - ssa.OpAMD64VPHSUBW256, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, - ssa.OpAMD64VXORPS128, - ssa.OpAMD64VMINPS128, - ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VPMULHW128, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VPSUBQ512, - ssa.OpAMD64VPADDB128, - ssa.OpAMD64VPSUBSB256, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VPCMPUB512, - ssa.OpAMD64VSCALEFPS128, - ssa.OpAMD64VSQRTPS512, - ssa.OpAMD64VPSIGND128, - ssa.OpAMD64VRSQRT14PD512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPCMPUB256, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPXORD512, - ssa.OpAMD64VHADDPD256, - ssa.OpAMD64VPMAXSB128, - ssa.OpAMD64VPHSUBD128, - ssa.OpAMD64VPCMPUQMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VPCMPUDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VPSUBD512, - ssa.OpAMD64VANDNPD128, - ssa.OpAMD64VPHADDD256, - ssa.OpAMD64VMINPS512, - ssa.OpAMD64VPMULDQ512, - ssa.OpAMD64VCMPPSMasked128, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPCMPB128, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPABSW256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VPCMPEQB128, - ssa.OpAMD64VPMULLW256, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPADDB256, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPSUBD128, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, - ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPMAXSQ256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VMULPS512, - ssa.OpAMD64VPCMPUD256, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPADDSB128, - ssa.OpAMD64VMINPD512, - ssa.OpAMD64VPMAXSD512, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPCMPQ512, - ssa.OpAMD64VPMINUB256, - ssa.OpAMD64VPCMPWMasked512, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPABSB256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTQ128, - ssa.OpAMD64VPMINSD256, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPOPCNTD256, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VPABSQ256, - ssa.OpAMD64VPOPCNTW256, - ssa.OpAMD64VDIVPS256, - ssa.OpAMD64VPHADDSW128, - ssa.OpAMD64VPSUBD256, - ssa.OpAMD64VRSQRT14PD128, - ssa.OpAMD64VDIVPD128, - ssa.OpAMD64VPCMPUDMasked128, - ssa.OpAMD64VPCMPEQQMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VPMULLQ256, - ssa.OpAMD64VANDNPDMasked256, - ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VCMPPDMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VPCMPGTWMasked512, - ssa.OpAMD64VPOR128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPCMPQ128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VXORPD128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPCMPUQMasked256, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, - ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VPMULLW128, - ssa.OpAMD64VPSUBW128, - ssa.OpAMD64VPXOR256, - ssa.OpAMD64VPCMPD512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPADDD128, - ssa.OpAMD64VRSQRTPS128, - ssa.OpAMD64VHADDPD128, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPOPCNTD512, - ssa.OpAMD64VPANDNQ512, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPCMPD256, - ssa.OpAMD64VPMINSDMasked256, - ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VSQRTPD512, - ssa.OpAMD64VPADDSWMasked256, - ssa.OpAMD64VPCMPB512, - ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VRCP14PD512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPCMPWMasked256, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPCMPW256, - ssa.OpAMD64VPABSD128, - ssa.OpAMD64VPMINUD256, - ssa.OpAMD64VCMPPDMasked512, - ssa.OpAMD64VCMPPD512, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPSUBQ256, - ssa.OpAMD64VPMINSD128, - ssa.OpAMD64VPADDSB256, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VANDPD512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPHADDSW256, - ssa.OpAMD64VPAND256, - ssa.OpAMD64VADDPS512, - ssa.OpAMD64VPMINUQ256, - ssa.OpAMD64VADDPD256, - ssa.OpAMD64VPABSB128, - ssa.OpAMD64VPANDND512, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPMAXSW256, - ssa.OpAMD64VMAXPD256, - ssa.OpAMD64VMULPD128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPADDD256, - ssa.OpAMD64VPOPCNTQ512, - ssa.OpAMD64VMINPD256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, - ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPCMPUBMasked128, - ssa.OpAMD64VPMINSW512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPAVGB128, - ssa.OpAMD64VADDPD512, - ssa.OpAMD64VPMULHW512, - ssa.OpAMD64VPADDQ256, - ssa.OpAMD64VPMINUQ512, - ssa.OpAMD64VORPS512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VSCALEFPD128, - ssa.OpAMD64VPCMPGTW256, - ssa.OpAMD64VPMAXUW256, - ssa.OpAMD64VPMAXUD512, - ssa.OpAMD64VPMAXUQ256, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VSQRTPD128, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPMINUD512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VPMAXSB512, - ssa.OpAMD64VPABSB512, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VADDPS128, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VANDNPS256, - ssa.OpAMD64VPMAXSB256, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPHSUBW128, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPMAXSD256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPADDQ512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPCMPGTB128, - ssa.OpAMD64VPMAXUQMasked128, - ssa.OpAMD64VPCMPW128, - ssa.OpAMD64VPCMPGTQ128, - ssa.OpAMD64VPANDQ512, - ssa.OpAMD64VRCP14PSMasked512, - ssa.OpAMD64VANDPS256, - ssa.OpAMD64VPHSUBD256, - ssa.OpAMD64VPSUBW512, - ssa.OpAMD64VHADDPS256, - ssa.OpAMD64VMULPD256, - ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VHSUBPD128, - ssa.OpAMD64VPMAXUW128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VPHADDD128, - ssa.OpAMD64VPMINUD128, - ssa.OpAMD64VPSIGND256, - ssa.OpAMD64VPADDSW128, - ssa.OpAMD64VCMPPS512, - ssa.OpAMD64VPMAXUQ512, - ssa.OpAMD64VPCMPGTQ512, - ssa.OpAMD64VADDPDMasked256, - ssa.OpAMD64VHSUBPD256, - ssa.OpAMD64VPCMPGTWMasked256, - ssa.OpAMD64VPOPCNTW128, - ssa.OpAMD64VPSUBSB512, - ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPMAXSD128, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPOPCNTQ256, - ssa.OpAMD64VPMAXSQ128, - ssa.OpAMD64VANDPD256, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VSQRTPS128, - ssa.OpAMD64VPCMPGTQ256, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VXORPD512, - ssa.OpAMD64VPCMPGTWMasked128, - ssa.OpAMD64VPMULLQ128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VPMAXUD256, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, - ssa.OpAMD64VRSQRTPS256, - ssa.OpAMD64VPXORQ512, - ssa.OpAMD64VPMAXSDMasked256, - ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMINSQ256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPMULUDQ128, - ssa.OpAMD64VPMAXUB256, - ssa.OpAMD64VPMINUW256, - ssa.OpAMD64VPADDSWMasked512, - ssa.OpAMD64VPADDSB512, + ssa.OpAMD64VPMINUWMasked256, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VPMULUDQ256, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VPCMPW512, - ssa.OpAMD64VPABSD256, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPMULHUW512, - ssa.OpAMD64VPSUBQ128, - ssa.OpAMD64VPADDDMasked512, - ssa.OpAMD64VRCP14PS128, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VSCALEFPD512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VXORPS512, - ssa.OpAMD64VPSUBSB128, - ssa.OpAMD64VPMAXSW128, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, - ssa.OpAMD64VPSUBB512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPOPCNTD128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPMINSB256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VPCMPEQW512, - ssa.OpAMD64VPMULHUW256, - ssa.OpAMD64VPMULLW512, - ssa.OpAMD64VPADDB512, - ssa.OpAMD64VPOPCNTB512, - ssa.OpAMD64VPORD512, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VPMAXSW512, - ssa.OpAMD64VPMINUW512, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VPCMPGTQMasked512, - ssa.OpAMD64VRCP14PD128, - ssa.OpAMD64VPHSUBSW256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VMAXPS256, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VPCMPEQWMasked512, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VPHADDW256, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VRCP14PS256, - ssa.OpAMD64VPSIGNB256, - ssa.OpAMD64VPSUBSW256, - ssa.OpAMD64VDIVPD512, - ssa.OpAMD64VPADDW128, - ssa.OpAMD64VXORPS256, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPAVGB512, - ssa.OpAMD64VPMAXUW512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VPSIGNW256, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: + p = simdFp2k1fp1(s, v) + + case ssa.OpAMD64VPCMPEQWMasked256, + ssa.OpAMD64VPCMPEQWMasked512, + ssa.OpAMD64VPCMPEQWMasked128, + ssa.OpAMD64VPCMPEQDMasked512, + ssa.OpAMD64VPCMPEQDMasked128, + ssa.OpAMD64VPCMPEQDMasked256, ssa.OpAMD64VPCMPEQQMasked128, - ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VPCMPGTD128, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMINSB512, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VPMINUW128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, - ssa.OpAMD64VSQRTPD256, - ssa.OpAMD64VPCMPGTD256, - ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VPMAXUB512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPS256, ssa.OpAMD64VPCMPEQQMasked256, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VORPS128, - ssa.OpAMD64VPMINUB128, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPAVGB256, - ssa.OpAMD64VPMINSQ128, - ssa.OpAMD64VPCMPEQQ256, - ssa.OpAMD64VMULPD512, - ssa.OpAMD64VPABSQ512, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPSIGNW128, - ssa.OpAMD64VPABSQ128, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VPMULHW256, - ssa.OpAMD64VSCALEFPS512, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VADDPD128, - ssa.OpAMD64VSCALEFPD256, - ssa.OpAMD64VANDPS128, - ssa.OpAMD64VMULPS256, - ssa.OpAMD64VPMINSW128, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VMINPS256, - ssa.OpAMD64VPMAXUQ128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VPAVGW256, - ssa.OpAMD64VMAXPD128, - ssa.OpAMD64VPSIGNB128, - ssa.OpAMD64VPMINUB512, - ssa.OpAMD64VPABSW128, - ssa.OpAMD64VPCMPGTW128, - ssa.OpAMD64VORPS256, - ssa.OpAMD64VPMINSB128, - ssa.OpAMD64VPMINUQ128, - ssa.OpAMD64VPMINSQ512, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VPCMPEQB256, - ssa.OpAMD64VANDNPD512, + ssa.OpAMD64VPCMPEQQMasked512, + ssa.OpAMD64VPCMPEQBMasked128, + ssa.OpAMD64VPCMPEQBMasked256, + ssa.OpAMD64VPCMPEQBMasked512, + ssa.OpAMD64VPCMPGTWMasked256, + ssa.OpAMD64VPCMPGTWMasked512, + ssa.OpAMD64VPCMPGTWMasked128, + ssa.OpAMD64VPCMPGTDMasked512, + ssa.OpAMD64VPCMPGTDMasked128, + ssa.OpAMD64VPCMPGTDMasked256, ssa.OpAMD64VPCMPGTQMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VORPD128, - ssa.OpAMD64VMAXPS512, - ssa.OpAMD64VPMULLD512, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VANDNPS128, - ssa.OpAMD64VMULPS128, - ssa.OpAMD64VPMULLQ512, - ssa.OpAMD64VRSQRT14PS512, - ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VPCMPGTQMasked256, + ssa.OpAMD64VPCMPGTQMasked512, + ssa.OpAMD64VPCMPGTBMasked128, + ssa.OpAMD64VPCMPGTBMasked256, + ssa.OpAMD64VPCMPGTBMasked512: + p = simdFp2k1k1(s, v) + + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, + ssa.OpAMD64VPABSWMasked128, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, + ssa.OpAMD64VPABSQMasked256, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMAXUD128, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPCMPEQQ512, - ssa.OpAMD64VSQRTPS256, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, + ssa.OpAMD64VRCP14PDMasked512, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, + ssa.OpAMD64VRSQRT14PDMasked256, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPOPCNTWMasked256, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, + ssa.OpAMD64VPOPCNTQMasked512, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VDIVPS128, - ssa.OpAMD64VRCP14PD256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VPCMPEQWMasked256, - ssa.OpAMD64VSCALEFPSMasked256, - ssa.OpAMD64VPAVGW512: - p.To.Type = obj.TYPE_REG - p.To.Reg = simdReg(v) + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512: + p = simdFp1k1fp1(s, v) + + case ssa.OpAMD64VCMPPS128, + ssa.OpAMD64VCMPPS256, + ssa.OpAMD64VCMPPD128, + ssa.OpAMD64VCMPPD256: + p = simdFp21Imm8(s, v) + + case ssa.OpAMD64VCMPPS512, + ssa.OpAMD64VCMPPD512, + ssa.OpAMD64VPCMPUW256, + ssa.OpAMD64VPCMPUW512, + ssa.OpAMD64VPCMPUW128, + ssa.OpAMD64VPCMPUD512, + ssa.OpAMD64VPCMPUD128, + ssa.OpAMD64VPCMPUD256, + ssa.OpAMD64VPCMPUQ128, + ssa.OpAMD64VPCMPUQ256, + ssa.OpAMD64VPCMPUQ512, + ssa.OpAMD64VPCMPUB128, + ssa.OpAMD64VPCMPUB256, + ssa.OpAMD64VPCMPUB512, + ssa.OpAMD64VPCMPW256, + ssa.OpAMD64VPCMPW512, + ssa.OpAMD64VPCMPW128, + ssa.OpAMD64VPCMPD512, + ssa.OpAMD64VPCMPD128, + ssa.OpAMD64VPCMPD256, + ssa.OpAMD64VPCMPQ128, + ssa.OpAMD64VPCMPQ256, + ssa.OpAMD64VPCMPQ512, + ssa.OpAMD64VPCMPB128, + ssa.OpAMD64VPCMPB256, + ssa.OpAMD64VPCMPB512: + p = simdFp2k1Imm8(s, v) + + case ssa.OpAMD64VCMPPSMasked512, + ssa.OpAMD64VCMPPSMasked128, + ssa.OpAMD64VCMPPSMasked256, + ssa.OpAMD64VCMPPDMasked128, + ssa.OpAMD64VCMPPDMasked256, + ssa.OpAMD64VCMPPDMasked512, + ssa.OpAMD64VPCMPUWMasked256, + ssa.OpAMD64VPCMPUWMasked512, + ssa.OpAMD64VPCMPUWMasked128, + ssa.OpAMD64VPCMPUDMasked512, + ssa.OpAMD64VPCMPUDMasked128, + ssa.OpAMD64VPCMPUDMasked256, + ssa.OpAMD64VPCMPUQMasked128, + ssa.OpAMD64VPCMPUQMasked256, + ssa.OpAMD64VPCMPUQMasked512, + ssa.OpAMD64VPCMPUBMasked128, + ssa.OpAMD64VPCMPUBMasked256, + ssa.OpAMD64VPCMPUBMasked512, + ssa.OpAMD64VPCMPWMasked256, + ssa.OpAMD64VPCMPWMasked512, + ssa.OpAMD64VPCMPWMasked128, + ssa.OpAMD64VPCMPDMasked512, + ssa.OpAMD64VPCMPDMasked128, + ssa.OpAMD64VPCMPDMasked256, + ssa.OpAMD64VPCMPQMasked128, + ssa.OpAMD64VPCMPQMasked256, + ssa.OpAMD64VPCMPQMasked512, + ssa.OpAMD64VPCMPBMasked128, + ssa.OpAMD64VPCMPBMasked256, + ssa.OpAMD64VPCMPBMasked512: + p = simdFp2k1k1Imm8(s, v) default: - // One result is required. + // Unknown reg shape return false } // Masked operation are always compiled with zeroing. switch v.Op { - case ssa.OpAMD64VPMINSDMasked128, - ssa.OpAMD64VMULPSMasked128, - ssa.OpAMD64VPANDQMasked256, - ssa.OpAMD64VSQRTPDMasked128, - ssa.OpAMD64VPMULHUWMasked512, - ssa.OpAMD64VPMULLQMasked256, - ssa.OpAMD64VPMULLDMasked128, - ssa.OpAMD64VPADDQMasked512, - ssa.OpAMD64VPMINUBMasked128, - ssa.OpAMD64VPMULLWMasked128, - ssa.OpAMD64VPMULLWMasked256, - ssa.OpAMD64VPADDSBMasked256, - ssa.OpAMD64VMULPSMasked256, - ssa.OpAMD64VPADDBMasked512, - ssa.OpAMD64VPADDSBMasked512, - ssa.OpAMD64VPORQMasked128, - ssa.OpAMD64VPSUBSBMasked512, - ssa.OpAMD64VSQRTPDMasked256, - ssa.OpAMD64VPAVGWMasked256, - ssa.OpAMD64VPMULUDQMasked256, - ssa.OpAMD64VPADDWMasked512, - ssa.OpAMD64VPOPCNTWMasked128, - ssa.OpAMD64VPANDNQMasked512, - ssa.OpAMD64VPOPCNTDMasked256, - ssa.OpAMD64VMAXPDMasked512, - ssa.OpAMD64VPMAXUWMasked256, - ssa.OpAMD64VPMINSQMasked128, - ssa.OpAMD64VPSUBSWMasked512, - ssa.OpAMD64VPXORQMasked128, + case ssa.OpAMD64VPABSWMasked256, + ssa.OpAMD64VPABSWMasked512, ssa.OpAMD64VPABSWMasked128, - ssa.OpAMD64VPMAXSWMasked256, - ssa.OpAMD64VPMULLWMasked512, - ssa.OpAMD64VPMULDQMasked512, - ssa.OpAMD64VRSQRT14PDMasked128, - ssa.OpAMD64VXORPSMasked128, - ssa.OpAMD64VANDNPSMasked256, - ssa.OpAMD64VPMULUDQMasked128, - ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPABSDMasked512, + ssa.OpAMD64VPABSDMasked128, + ssa.OpAMD64VPABSDMasked256, + ssa.OpAMD64VPABSQMasked128, ssa.OpAMD64VPABSQMasked256, - ssa.OpAMD64VPAVGBMasked512, - ssa.OpAMD64VPANDNDMasked256, - ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPABSQMasked512, + ssa.OpAMD64VPABSBMasked128, + ssa.OpAMD64VPABSBMasked256, + ssa.OpAMD64VPABSBMasked512, + ssa.OpAMD64VADDPSMasked512, + ssa.OpAMD64VADDPSMasked128, + ssa.OpAMD64VADDPSMasked256, ssa.OpAMD64VADDPDMasked128, - ssa.OpAMD64VPMULLQMasked512, - ssa.OpAMD64VPORQMasked256, - ssa.OpAMD64VSCALEFPDMasked256, - ssa.OpAMD64VPSUBDMasked512, - ssa.OpAMD64VDIVPSMasked512, - ssa.OpAMD64VDIVPDMasked128, - ssa.OpAMD64VXORPDMasked128, - ssa.OpAMD64VPMINSWMasked512, - ssa.OpAMD64VPANDDMasked256, - ssa.OpAMD64VRCP14PSMasked128, - ssa.OpAMD64VMULPDMasked512, - ssa.OpAMD64VORPSMasked512, - ssa.OpAMD64VPORDMasked256, - ssa.OpAMD64VPMAXSQMasked512, - ssa.OpAMD64VPADDDMasked256, - ssa.OpAMD64VPSUBQMasked256, - ssa.OpAMD64VXORPSMasked512, - ssa.OpAMD64VDIVPDMasked512, - ssa.OpAMD64VPMAXUDMasked128, - ssa.OpAMD64VPABSWMasked256, - ssa.OpAMD64VPOPCNTWMasked512, - ssa.OpAMD64VPXORQMasked512, - ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VADDPDMasked512, ssa.OpAMD64VPADDWMasked256, - ssa.OpAMD64VPSUBWMasked512, - ssa.OpAMD64VPMULLQMasked128, - ssa.OpAMD64VPMINUWMasked128, - ssa.OpAMD64VPOPCNTQMasked128, - ssa.OpAMD64VPXORDMasked256, - ssa.OpAMD64VPORQMasked512, - ssa.OpAMD64VORPDMasked512, - ssa.OpAMD64VPOPCNTDMasked512, - ssa.OpAMD64VPMINSWMasked128, - ssa.OpAMD64VPXORDMasked512, - ssa.OpAMD64VRCP14PDMasked256, - ssa.OpAMD64VPMULUDQMasked512, - ssa.OpAMD64VMAXPSMasked256, - ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VPADDWMasked512, ssa.OpAMD64VPADDWMasked128, - ssa.OpAMD64VMULPDMasked128, - ssa.OpAMD64VANDNPDMasked512, - ssa.OpAMD64VPSUBBMasked512, - ssa.OpAMD64VPANDNDMasked128, - ssa.OpAMD64VPMINUDMasked128, - ssa.OpAMD64VPAVGWMasked128, - ssa.OpAMD64VPMULLDMasked256, - ssa.OpAMD64VPSUBBMasked256, - ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPADDDMasked128, + ssa.OpAMD64VPADDDMasked256, + ssa.OpAMD64VPADDQMasked128, + ssa.OpAMD64VPADDQMasked256, + ssa.OpAMD64VPADDQMasked512, ssa.OpAMD64VPADDBMasked128, - ssa.OpAMD64VPMAXUBMasked512, - ssa.OpAMD64VDIVPSMasked256, - ssa.OpAMD64VPOPCNTBMasked256, - ssa.OpAMD64VPSUBDMasked256, - ssa.OpAMD64VPAVGWMasked512, - ssa.OpAMD64VSCALEFPSMasked128, - ssa.OpAMD64VMINPSMasked512, - ssa.OpAMD64VPSUBQMasked512, - ssa.OpAMD64VSQRTPSMasked512, - ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPADDBMasked256, + ssa.OpAMD64VPADDBMasked512, ssa.OpAMD64VANDPSMasked512, - ssa.OpAMD64VPMAXUBMasked256, - ssa.OpAMD64VPSUBWMasked256, - ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VANDPSMasked128, + ssa.OpAMD64VANDPSMasked256, + ssa.OpAMD64VANDPDMasked128, + ssa.OpAMD64VANDPDMasked256, + ssa.OpAMD64VANDPDMasked512, ssa.OpAMD64VPANDDMasked512, - ssa.OpAMD64VPABSWMasked512, - ssa.OpAMD64VPSUBSWMasked256, - ssa.OpAMD64VPSUBQMasked128, - ssa.OpAMD64VPMINUBMasked512, - ssa.OpAMD64VPMULLDMasked512, - ssa.OpAMD64VMULPDMasked256, - ssa.OpAMD64VPSUBSWMasked128, - ssa.OpAMD64VPOPCNTQMasked256, - ssa.OpAMD64VPXORQMasked256, - ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPANDDMasked256, + ssa.OpAMD64VPANDQMasked128, + ssa.OpAMD64VPANDQMasked256, ssa.OpAMD64VPANDQMasked512, - ssa.OpAMD64VPORDMasked512, - ssa.OpAMD64VPMAXUWMasked512, - ssa.OpAMD64VPMAXSDMasked128, - ssa.OpAMD64VPMULDQMasked128, - ssa.OpAMD64VPMINUDMasked256, - ssa.OpAMD64VPABSBMasked512, - ssa.OpAMD64VPANDNDMasked512, - ssa.OpAMD64VPMINUDMasked512, - ssa.OpAMD64VPABSBMasked256, - ssa.OpAMD64VXORPDMasked256, - ssa.OpAMD64VMAXPSMasked512, - ssa.OpAMD64VPABSBMasked128, - ssa.OpAMD64VPSUBBMasked128, - ssa.OpAMD64VPMINSDMasked512, - ssa.OpAMD64VPSUBSBMasked256, - ssa.OpAMD64VORPSMasked256, - ssa.OpAMD64VADDPSMasked128, - ssa.OpAMD64VXORPDMasked512, - ssa.OpAMD64VPMINUQMasked512, - ssa.OpAMD64VDIVPDMasked256, - ssa.OpAMD64VPADDQMasked256, - ssa.OpAMD64VPABSDMasked512, - ssa.OpAMD64VPMAXUDMasked256, - ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VANDNPSMasked512, + ssa.OpAMD64VANDNPSMasked128, + ssa.OpAMD64VANDNPSMasked256, + ssa.OpAMD64VANDNPDMasked128, + ssa.OpAMD64VANDNPDMasked256, + ssa.OpAMD64VANDNPDMasked512, + ssa.OpAMD64VPANDNDMasked512, + ssa.OpAMD64VPANDNDMasked128, + ssa.OpAMD64VPANDNDMasked256, + ssa.OpAMD64VPANDNQMasked128, + ssa.OpAMD64VPANDNQMasked256, + ssa.OpAMD64VPANDNQMasked512, ssa.OpAMD64VRCP14PSMasked512, + ssa.OpAMD64VRCP14PSMasked128, + ssa.OpAMD64VRCP14PSMasked256, + ssa.OpAMD64VRCP14PDMasked128, + ssa.OpAMD64VRCP14PDMasked256, ssa.OpAMD64VRCP14PDMasked512, - ssa.OpAMD64VPSUBDMasked128, - ssa.OpAMD64VPXORDMasked128, - ssa.OpAMD64VORPDMasked128, - ssa.OpAMD64VADDPDMasked256, + ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VRSQRT14PSMasked128, + ssa.OpAMD64VRSQRT14PSMasked256, + ssa.OpAMD64VRSQRT14PDMasked128, ssa.OpAMD64VRSQRT14PDMasked256, - ssa.OpAMD64VPADDQMasked128, - ssa.OpAMD64VPSUBSBMasked128, - ssa.OpAMD64VPADDSBMasked128, - ssa.OpAMD64VPSUBWMasked128, - ssa.OpAMD64VSCALEFPDMasked128, - ssa.OpAMD64VANDNPDMasked128, - ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VRSQRT14PDMasked512, + ssa.OpAMD64VPAVGWMasked256, + ssa.OpAMD64VPAVGWMasked512, + ssa.OpAMD64VPAVGWMasked128, + ssa.OpAMD64VPAVGBMasked128, + ssa.OpAMD64VPAVGBMasked256, + ssa.OpAMD64VPAVGBMasked512, + ssa.OpAMD64VDIVPSMasked512, + ssa.OpAMD64VDIVPSMasked128, + ssa.OpAMD64VDIVPSMasked256, + ssa.OpAMD64VDIVPDMasked128, + ssa.OpAMD64VDIVPDMasked256, + ssa.OpAMD64VDIVPDMasked512, + ssa.OpAMD64VMAXPSMasked512, + ssa.OpAMD64VMAXPSMasked128, + ssa.OpAMD64VMAXPSMasked256, + ssa.OpAMD64VMAXPDMasked128, + ssa.OpAMD64VMAXPDMasked256, + ssa.OpAMD64VMAXPDMasked512, + ssa.OpAMD64VPMAXSWMasked256, + ssa.OpAMD64VPMAXSWMasked512, + ssa.OpAMD64VPMAXSWMasked128, + ssa.OpAMD64VPMAXSDMasked512, + ssa.OpAMD64VPMAXSDMasked128, ssa.OpAMD64VPMAXSDMasked256, + ssa.OpAMD64VPMAXSQMasked128, + ssa.OpAMD64VPMAXSQMasked256, + ssa.OpAMD64VPMAXSQMasked512, + ssa.OpAMD64VPMAXSBMasked128, + ssa.OpAMD64VPMAXSBMasked256, + ssa.OpAMD64VPMAXSBMasked512, + ssa.OpAMD64VPMAXUWMasked256, + ssa.OpAMD64VPMAXUWMasked512, + ssa.OpAMD64VPMAXUWMasked128, + ssa.OpAMD64VPMAXUDMasked512, + ssa.OpAMD64VPMAXUDMasked128, + ssa.OpAMD64VPMAXUDMasked256, + ssa.OpAMD64VPMAXUQMasked128, + ssa.OpAMD64VPMAXUQMasked256, + ssa.OpAMD64VPMAXUQMasked512, + ssa.OpAMD64VPMAXUBMasked128, + ssa.OpAMD64VPMAXUBMasked256, + ssa.OpAMD64VPMAXUBMasked512, + ssa.OpAMD64VMINPSMasked512, + ssa.OpAMD64VMINPSMasked128, + ssa.OpAMD64VMINPSMasked256, + ssa.OpAMD64VMINPDMasked128, + ssa.OpAMD64VMINPDMasked256, + ssa.OpAMD64VMINPDMasked512, + ssa.OpAMD64VPMINSWMasked256, + ssa.OpAMD64VPMINSWMasked512, + ssa.OpAMD64VPMINSWMasked128, + ssa.OpAMD64VPMINSDMasked512, + ssa.OpAMD64VPMINSDMasked128, + ssa.OpAMD64VPMINSDMasked256, + ssa.OpAMD64VPMINSQMasked128, + ssa.OpAMD64VPMINSQMasked256, + ssa.OpAMD64VPMINSQMasked512, + ssa.OpAMD64VPMINSBMasked128, + ssa.OpAMD64VPMINSBMasked256, + ssa.OpAMD64VPMINSBMasked512, ssa.OpAMD64VPMINUWMasked256, - ssa.OpAMD64VPMULDQMasked256, - ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPMINUWMasked512, + ssa.OpAMD64VPMINUWMasked128, + ssa.OpAMD64VPMINUDMasked512, + ssa.OpAMD64VPMINUDMasked128, + ssa.OpAMD64VPMINUDMasked256, ssa.OpAMD64VPMINUQMasked128, - ssa.OpAMD64VANDPSMasked256, - ssa.OpAMD64VPADDDMasked128, - ssa.OpAMD64VANDNPSMasked128, - ssa.OpAMD64VPABSDMasked128, - ssa.OpAMD64VPADDSWMasked128, - ssa.OpAMD64VPADDDMasked512, + ssa.OpAMD64VPMINUQMasked256, + ssa.OpAMD64VPMINUQMasked512, + ssa.OpAMD64VPMINUBMasked128, + ssa.OpAMD64VPMINUBMasked256, + ssa.OpAMD64VPMINUBMasked512, ssa.OpAMD64VMULPSMasked512, - ssa.OpAMD64VORPDMasked256, - ssa.OpAMD64VPMULHUWMasked256, - ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VMULPSMasked128, + ssa.OpAMD64VMULPSMasked256, + ssa.OpAMD64VMULPDMasked128, + ssa.OpAMD64VMULPDMasked256, + ssa.OpAMD64VMULPDMasked512, + ssa.OpAMD64VSCALEFPSMasked512, + ssa.OpAMD64VSCALEFPSMasked128, + ssa.OpAMD64VSCALEFPSMasked256, + ssa.OpAMD64VSCALEFPDMasked128, + ssa.OpAMD64VSCALEFPDMasked256, ssa.OpAMD64VSCALEFPDMasked512, - ssa.OpAMD64VPMAXSBMasked512, - ssa.OpAMD64VPMINSQMasked256, - ssa.OpAMD64VPMINSBMasked512, - ssa.OpAMD64VADDPDMasked512, + ssa.OpAMD64VPMULDQMasked128, + ssa.OpAMD64VPMULDQMasked256, + ssa.OpAMD64VPMULDQMasked512, + ssa.OpAMD64VPMULUDQMasked128, + ssa.OpAMD64VPMULUDQMasked256, + ssa.OpAMD64VPMULUDQMasked512, + ssa.OpAMD64VPMULHWMasked256, ssa.OpAMD64VPMULHWMasked512, - ssa.OpAMD64VRSQRT14PSMasked256, - ssa.OpAMD64VPOPCNTBMasked128, - ssa.OpAMD64VPMAXUQMasked256, - ssa.OpAMD64VPABSQMasked512, - ssa.OpAMD64VDIVPSMasked128, - ssa.OpAMD64VPAVGBMasked128, - ssa.OpAMD64VMAXPDMasked128, - ssa.OpAMD64VADDPSMasked256, - ssa.OpAMD64VRSQRT14PSMasked512, + ssa.OpAMD64VPMULHWMasked128, + ssa.OpAMD64VPMULHUWMasked256, + ssa.OpAMD64VPMULHUWMasked512, + ssa.OpAMD64VPMULHUWMasked128, + ssa.OpAMD64VPMULLWMasked256, + ssa.OpAMD64VPMULLWMasked512, + ssa.OpAMD64VPMULLWMasked128, + ssa.OpAMD64VPMULLDMasked512, + ssa.OpAMD64VPMULLDMasked128, + ssa.OpAMD64VPMULLDMasked256, + ssa.OpAMD64VPMULLQMasked128, + ssa.OpAMD64VPMULLQMasked256, + ssa.OpAMD64VPMULLQMasked512, + ssa.OpAMD64VORPSMasked512, ssa.OpAMD64VORPSMasked128, - ssa.OpAMD64VPANDQMasked128, - ssa.OpAMD64VRCP14PDMasked128, - ssa.OpAMD64VADDPSMasked512, - ssa.OpAMD64VPMAXSQMasked128, - ssa.OpAMD64VPOPCNTDMasked128, - ssa.OpAMD64VXORPSMasked256, - ssa.OpAMD64VPMINSQMasked512, - ssa.OpAMD64VANDNPSMasked512, - ssa.OpAMD64VPMAXSWMasked512, - ssa.OpAMD64VSQRTPSMasked128, + ssa.OpAMD64VORPSMasked256, + ssa.OpAMD64VORPDMasked128, + ssa.OpAMD64VORPDMasked256, + ssa.OpAMD64VORPDMasked512, + ssa.OpAMD64VPORDMasked512, + ssa.OpAMD64VPORDMasked128, + ssa.OpAMD64VPORDMasked256, + ssa.OpAMD64VPORQMasked128, + ssa.OpAMD64VPORQMasked256, + ssa.OpAMD64VPORQMasked512, ssa.OpAMD64VPOPCNTWMasked256, - ssa.OpAMD64VMAXPDMasked256, - ssa.OpAMD64VPANDNQMasked256, - ssa.OpAMD64VPMULHUWMasked128, - ssa.OpAMD64VMINPDMasked512, - ssa.OpAMD64VMAXPSMasked128, - ssa.OpAMD64VPMAXSBMasked256, - ssa.OpAMD64VPANDDMasked128, + ssa.OpAMD64VPOPCNTWMasked512, + ssa.OpAMD64VPOPCNTWMasked128, + ssa.OpAMD64VPOPCNTDMasked512, + ssa.OpAMD64VPOPCNTDMasked128, + ssa.OpAMD64VPOPCNTDMasked256, + ssa.OpAMD64VPOPCNTQMasked128, + ssa.OpAMD64VPOPCNTQMasked256, ssa.OpAMD64VPOPCNTQMasked512, - ssa.OpAMD64VANDPDMasked128, - ssa.OpAMD64VPANDNQMasked128, - ssa.OpAMD64VSCALEFPSMasked512, - ssa.OpAMD64VANDPDMasked512, - ssa.OpAMD64VPMULHWMasked128, - ssa.OpAMD64VPABSDMasked256, - ssa.OpAMD64VPADDBMasked256, - ssa.OpAMD64VPMINUQMasked256, - ssa.OpAMD64VRSQRT14PDMasked512, - ssa.OpAMD64VPMINUBMasked256, - ssa.OpAMD64VPMAXSBMasked128, - ssa.OpAMD64VPMAXSWMasked128, - ssa.OpAMD64VPMINSBMasked256, - ssa.OpAMD64VSQRTPDMasked512, - ssa.OpAMD64VPMINSWMasked256, - ssa.OpAMD64VMINPSMasked256, - ssa.OpAMD64VPMAXUBMasked128, - ssa.OpAMD64VANDPDMasked256, - ssa.OpAMD64VMINPDMasked128, - ssa.OpAMD64VMINPSMasked128, - ssa.OpAMD64VRCP14PSMasked256, - ssa.OpAMD64VPMINSBMasked128, - ssa.OpAMD64VPMULHWMasked256, + ssa.OpAMD64VPOPCNTBMasked128, + ssa.OpAMD64VPOPCNTBMasked256, + ssa.OpAMD64VPOPCNTBMasked512, + ssa.OpAMD64VPADDSWMasked256, + ssa.OpAMD64VPADDSWMasked512, + ssa.OpAMD64VPADDSWMasked128, + ssa.OpAMD64VPADDSBMasked128, + ssa.OpAMD64VPADDSBMasked256, + ssa.OpAMD64VPADDSBMasked512, + ssa.OpAMD64VPSUBSWMasked256, + ssa.OpAMD64VPSUBSWMasked512, + ssa.OpAMD64VPSUBSWMasked128, + ssa.OpAMD64VPSUBSBMasked128, + ssa.OpAMD64VPSUBSBMasked256, + ssa.OpAMD64VPSUBSBMasked512, + ssa.OpAMD64VSQRTPSMasked512, + ssa.OpAMD64VSQRTPSMasked128, ssa.OpAMD64VSQRTPSMasked256, - ssa.OpAMD64VPMAXUDMasked512, - ssa.OpAMD64VPMAXUQMasked512, - ssa.OpAMD64VANDPSMasked128, - ssa.OpAMD64VPABSQMasked128, - ssa.OpAMD64VPMAXSQMasked256, - ssa.OpAMD64VPAVGBMasked256, - ssa.OpAMD64VSCALEFPSMasked256: + ssa.OpAMD64VSQRTPDMasked128, + ssa.OpAMD64VSQRTPDMasked256, + ssa.OpAMD64VSQRTPDMasked512, + ssa.OpAMD64VPSUBWMasked256, + ssa.OpAMD64VPSUBWMasked512, + ssa.OpAMD64VPSUBWMasked128, + ssa.OpAMD64VPSUBDMasked512, + ssa.OpAMD64VPSUBDMasked128, + ssa.OpAMD64VPSUBDMasked256, + ssa.OpAMD64VPSUBQMasked128, + ssa.OpAMD64VPSUBQMasked256, + ssa.OpAMD64VPSUBQMasked512, + ssa.OpAMD64VPSUBBMasked128, + ssa.OpAMD64VPSUBBMasked256, + ssa.OpAMD64VPSUBBMasked512, + ssa.OpAMD64VXORPSMasked512, + ssa.OpAMD64VXORPSMasked128, + ssa.OpAMD64VXORPSMasked256, + ssa.OpAMD64VXORPDMasked128, + ssa.OpAMD64VXORPDMasked256, + ssa.OpAMD64VXORPDMasked512, + ssa.OpAMD64VPXORDMasked512, + ssa.OpAMD64VPXORDMasked128, + ssa.OpAMD64VPXORDMasked256, + ssa.OpAMD64VPXORQMasked128, + ssa.OpAMD64VPXORQMasked256, + ssa.OpAMD64VPXORQMasked512: x86.ParseSuffix(p, "Z") } diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules index a273131d46..a9daf27548 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64.rules @@ -1,1081 +1,1074 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. -// The AVX instruction encodings orders vector register from right to left, for example: -// VSUBPS X Y Z means Z=Y-X -// The rules here swapped the order of such X and Y because the ssa to prog lowering in simdssa.go assumes a -// left to right order. -// TODO: we should offload the logic to simdssa.go, instead of here. -// -// Masks are always at the end, immediates always at the beginning. -(AddFloat32x16 x y) => (VADDPS512 y x) -(AndFloat32x16 x y) => (VANDPS512 y x) -(AndNotFloat32x16 x y) => (VANDNPS512 y x) -(ApproximateReciprocalFloat32x16 x) => (VRCP14PS512 x) -(ApproximateReciprocalOfSqrtFloat32x16 x) => (VRSQRT14PS512 x) -(DivFloat32x16 x y) => (VDIVPS512 y x) -(MaxFloat32x16 x y) => (VMAXPS512 y x) -(MinFloat32x16 x y) => (VMINPS512 y x) -(MulFloat32x16 x y) => (VMULPS512 y x) -(MulByPowOf2Float32x16 x y) => (VSCALEFPS512 y x) -(OrFloat32x16 x y) => (VORPS512 y x) -(SqrtFloat32x16 x) => (VSQRTPS512 x) -(SubFloat32x16 x y) => (VADDPS512 y x) -(XorFloat32x16 x y) => (VXORPS512 y x) -(AddFloat32x4 x y) => (VADDPS128 y x) -(AndFloat32x4 x y) => (VANDPS128 y x) -(AndNotFloat32x4 x y) => (VANDNPS128 y x) -(ApproximateReciprocalFloat32x4 x) => (VRCP14PS128 x) -(ApproximateReciprocalOfSqrtFloat32x4 x) => (VRSQRTPS128 x) -(DivFloat32x4 x y) => (VDIVPS128 y x) -(MaxFloat32x4 x y) => (VMAXPS128 y x) -(MinFloat32x4 x y) => (VMINPS128 y x) -(MulFloat32x4 x y) => (VMULPS128 y x) -(MulByPowOf2Float32x4 x y) => (VSCALEFPS128 y x) -(OrFloat32x4 x y) => (VORPS128 y x) -(PairwiseAddFloat32x4 x y) => (VHADDPS128 y x) -(PairwiseSubFloat32x4 x y) => (VHSUBPS128 y x) -(SqrtFloat32x4 x) => (VSQRTPS128 x) -(SubFloat32x4 x y) => (VADDPS128 y x) -(XorFloat32x4 x y) => (VXORPS128 y x) -(AddFloat32x8 x y) => (VADDPS256 y x) -(AndFloat32x8 x y) => (VANDPS256 y x) -(AndNotFloat32x8 x y) => (VANDNPS256 y x) -(ApproximateReciprocalFloat32x8 x) => (VRCP14PS256 x) -(ApproximateReciprocalOfSqrtFloat32x8 x) => (VRSQRTPS256 x) -(DivFloat32x8 x y) => (VDIVPS256 y x) -(MaxFloat32x8 x y) => (VMAXPS256 y x) -(MinFloat32x8 x y) => (VMINPS256 y x) -(MulFloat32x8 x y) => (VMULPS256 y x) -(MulByPowOf2Float32x8 x y) => (VSCALEFPS256 y x) -(OrFloat32x8 x y) => (VORPS256 y x) -(PairwiseAddFloat32x8 x y) => (VHADDPS256 y x) -(PairwiseSubFloat32x8 x y) => (VHSUBPS256 y x) -(SqrtFloat32x8 x) => (VSQRTPS256 x) -(SubFloat32x8 x y) => (VADDPS256 y x) -(XorFloat32x8 x y) => (VXORPS256 y x) -(AddFloat64x2 x y) => (VADDPD128 y x) -(AndFloat64x2 x y) => (VANDPD128 y x) -(AndNotFloat64x2 x y) => (VANDNPD128 y x) -(ApproximateReciprocalFloat64x2 x) => (VRCP14PD128 x) -(ApproximateReciprocalOfSqrtFloat64x2 x) => (VRSQRT14PD128 x) -(DivFloat64x2 x y) => (VDIVPD128 y x) -(MaxFloat64x2 x y) => (VMAXPD128 y x) -(MinFloat64x2 x y) => (VMINPD128 y x) -(MulFloat64x2 x y) => (VMULPD128 y x) -(MulByPowOf2Float64x2 x y) => (VSCALEFPD128 y x) -(OrFloat64x2 x y) => (VORPD128 y x) -(PairwiseAddFloat64x2 x y) => (VHADDPD128 y x) -(PairwiseSubFloat64x2 x y) => (VHSUBPD128 y x) -(SqrtFloat64x2 x) => (VSQRTPD128 x) -(SubFloat64x2 x y) => (VADDPD128 y x) -(XorFloat64x2 x y) => (VXORPD128 y x) -(AddFloat64x4 x y) => (VADDPD256 y x) -(AndFloat64x4 x y) => (VANDPD256 y x) -(AndNotFloat64x4 x y) => (VANDNPD256 y x) -(ApproximateReciprocalFloat64x4 x) => (VRCP14PD256 x) -(ApproximateReciprocalOfSqrtFloat64x4 x) => (VRSQRT14PD256 x) -(DivFloat64x4 x y) => (VDIVPD256 y x) -(MaxFloat64x4 x y) => (VMAXPD256 y x) -(MinFloat64x4 x y) => (VMINPD256 y x) -(MulFloat64x4 x y) => (VMULPD256 y x) -(MulByPowOf2Float64x4 x y) => (VSCALEFPD256 y x) -(OrFloat64x4 x y) => (VORPD256 y x) -(PairwiseAddFloat64x4 x y) => (VHADDPD256 y x) -(PairwiseSubFloat64x4 x y) => (VHSUBPD256 y x) -(SqrtFloat64x4 x) => (VSQRTPD256 x) -(SubFloat64x4 x y) => (VADDPD256 y x) -(XorFloat64x4 x y) => (VXORPD256 y x) -(AddFloat64x8 x y) => (VADDPD512 y x) -(AndFloat64x8 x y) => (VANDPD512 y x) -(AndNotFloat64x8 x y) => (VANDNPD512 y x) -(ApproximateReciprocalFloat64x8 x) => (VRCP14PD512 x) -(ApproximateReciprocalOfSqrtFloat64x8 x) => (VRSQRT14PD512 x) -(DivFloat64x8 x y) => (VDIVPD512 y x) -(MaxFloat64x8 x y) => (VMAXPD512 y x) -(MinFloat64x8 x y) => (VMINPD512 y x) -(MulFloat64x8 x y) => (VMULPD512 y x) -(MulByPowOf2Float64x8 x y) => (VSCALEFPD512 y x) -(OrFloat64x8 x y) => (VORPD512 y x) -(SqrtFloat64x8 x) => (VSQRTPD512 x) -(SubFloat64x8 x y) => (VADDPD512 y x) -(XorFloat64x8 x y) => (VXORPD512 y x) -(AbsoluteInt16x16 x) => (VPABSW256 x) -(AddInt16x16 x y) => (VPADDW256 y x) -(AndInt16x16 x y) => (VPAND256 y x) -(AndNotInt16x16 x y) => (VPANDN256 y x) -(EqualInt16x16 x y) => (VPCMPEQW256 y x) -(GreaterInt16x16 x y) => (VPCMPGTW256 y x) -(MaxInt16x16 x y) => (VPMAXSW256 y x) -(MinInt16x16 x y) => (VPMINSW256 y x) -(MulHighInt16x16 x y) => (VPMULHW256 y x) -(MulLowInt16x16 x y) => (VPMULLW256 y x) -(OrInt16x16 x y) => (VPOR256 y x) -(PairwiseAddInt16x16 x y) => (VPHADDW256 y x) -(PairwiseSubInt16x16 x y) => (VPHSUBW256 y x) -(PopCountInt16x16 x) => (VPOPCNTW256 x) -(SaturatedAddInt16x16 x y) => (VPADDSW256 y x) -(SaturatedPairwiseAddInt16x16 x y) => (VPHADDSW256 y x) -(SaturatedPairwiseSubInt16x16 x y) => (VPHSUBSW256 y x) -(SaturatedSubInt16x16 x y) => (VPSUBSW256 y x) -(SignInt16x16 x y) => (VPSIGNW256 y x) -(SubInt16x16 x y) => (VPSUBW256 y x) -(XorInt16x16 x y) => (VPXOR256 y x) -(AbsoluteInt16x32 x) => (VPABSW512 x) -(AddInt16x32 x y) => (VPADDW512 y x) -(MaxInt16x32 x y) => (VPMAXSW512 y x) -(MinInt16x32 x y) => (VPMINSW512 y x) -(MulHighInt16x32 x y) => (VPMULHW512 y x) -(MulLowInt16x32 x y) => (VPMULLW512 y x) -(PopCountInt16x32 x) => (VPOPCNTW512 x) -(SaturatedAddInt16x32 x y) => (VPADDSW512 y x) -(SaturatedSubInt16x32 x y) => (VPSUBSW512 y x) -(SubInt16x32 x y) => (VPSUBW512 y x) -(AbsoluteInt16x8 x) => (VPABSW128 x) -(AddInt16x8 x y) => (VPADDW128 y x) -(AndInt16x8 x y) => (VPAND128 y x) -(AndNotInt16x8 x y) => (VPANDN128 y x) -(EqualInt16x8 x y) => (VPCMPEQW128 y x) -(GreaterInt16x8 x y) => (VPCMPGTW128 y x) -(MaxInt16x8 x y) => (VPMAXSW128 y x) -(MinInt16x8 x y) => (VPMINSW128 y x) -(MulHighInt16x8 x y) => (VPMULHW128 y x) -(MulLowInt16x8 x y) => (VPMULLW128 y x) -(OrInt16x8 x y) => (VPOR128 y x) -(PairwiseAddInt16x8 x y) => (VPHADDW128 y x) -(PairwiseSubInt16x8 x y) => (VPHSUBW128 y x) -(PopCountInt16x8 x) => (VPOPCNTW128 x) -(SaturatedAddInt16x8 x y) => (VPADDSW128 y x) -(SaturatedPairwiseAddInt16x8 x y) => (VPHADDSW128 y x) -(SaturatedPairwiseSubInt16x8 x y) => (VPHSUBSW128 y x) -(SaturatedSubInt16x8 x y) => (VPSUBSW128 y x) -(SignInt16x8 x y) => (VPSIGNW128 y x) -(SubInt16x8 x y) => (VPSUBW128 y x) -(XorInt16x8 x y) => (VPXOR128 y x) -(AbsoluteInt32x16 x) => (VPABSD512 x) -(AddInt32x16 x y) => (VPADDD512 y x) -(AndInt32x16 x y) => (VPANDD512 y x) -(AndNotInt32x16 x y) => (VPANDND512 y x) -(MaxInt32x16 x y) => (VPMAXSD512 y x) -(MinInt32x16 x y) => (VPMINSD512 y x) -(MulLowInt32x16 x y) => (VPMULLD512 y x) -(OrInt32x16 x y) => (VPORD512 y x) -(PopCountInt32x16 x) => (VPOPCNTD512 x) -(SubInt32x16 x y) => (VPSUBD512 y x) -(XorInt32x16 x y) => (VPXORD512 y x) -(AbsoluteInt32x4 x) => (VPABSD128 x) -(AddInt32x4 x y) => (VPADDD128 y x) -(AndInt32x4 x y) => (VPAND128 y x) -(AndNotInt32x4 x y) => (VPANDN128 y x) -(EqualInt32x4 x y) => (VPCMPEQD128 y x) -(GreaterInt32x4 x y) => (VPCMPGTD128 y x) -(MaxInt32x4 x y) => (VPMAXSD128 y x) -(MinInt32x4 x y) => (VPMINSD128 y x) -(MulEvenWidenInt32x4 x y) => (VPMULDQ128 y x) -(MulLowInt32x4 x y) => (VPMULLD128 y x) -(OrInt32x4 x y) => (VPOR128 y x) -(PairwiseAddInt32x4 x y) => (VPHADDD128 y x) -(PairwiseSubInt32x4 x y) => (VPHSUBD128 y x) -(PopCountInt32x4 x) => (VPOPCNTD128 x) -(SignInt32x4 x y) => (VPSIGND128 y x) -(SubInt32x4 x y) => (VPSUBD128 y x) -(XorInt32x4 x y) => (VPXOR128 y x) -(AbsoluteInt32x8 x) => (VPABSD256 x) -(AddInt32x8 x y) => (VPADDD256 y x) -(AndInt32x8 x y) => (VPAND256 y x) -(AndNotInt32x8 x y) => (VPANDN256 y x) -(EqualInt32x8 x y) => (VPCMPEQD256 y x) -(GreaterInt32x8 x y) => (VPCMPGTD256 y x) -(MaxInt32x8 x y) => (VPMAXSD256 y x) -(MinInt32x8 x y) => (VPMINSD256 y x) -(MulEvenWidenInt32x8 x y) => (VPMULDQ256 y x) -(MulLowInt32x8 x y) => (VPMULLD256 y x) -(OrInt32x8 x y) => (VPOR256 y x) -(PairwiseAddInt32x8 x y) => (VPHADDD256 y x) -(PairwiseSubInt32x8 x y) => (VPHSUBD256 y x) -(PopCountInt32x8 x) => (VPOPCNTD256 x) -(SignInt32x8 x y) => (VPSIGND256 y x) -(SubInt32x8 x y) => (VPSUBD256 y x) -(XorInt32x8 x y) => (VPXOR256 y x) -(AbsoluteInt64x2 x) => (VPABSQ128 x) -(AddInt64x2 x y) => (VPADDQ128 y x) -(AndInt64x2 x y) => (VPAND128 y x) -(AndNotInt64x2 x y) => (VPANDN128 y x) -(EqualInt64x2 x y) => (VPCMPEQQ128 y x) -(MaxInt64x2 x y) => (VPMAXSQ128 y x) -(MinInt64x2 x y) => (VPMINSQ128 y x) -(MulEvenWidenInt64x2 x y) => (VPMULDQ128 y x) -(MulLowInt64x2 x y) => (VPMULLQ128 y x) -(OrInt64x2 x y) => (VPOR128 y x) -(PopCountInt64x2 x) => (VPOPCNTQ128 x) -(SubInt64x2 x y) => (VPSUBQ128 y x) -(XorInt64x2 x y) => (VPXOR128 y x) -(AbsoluteInt64x4 x) => (VPABSQ256 x) -(AddInt64x4 x y) => (VPADDQ256 y x) -(AndInt64x4 x y) => (VPAND256 y x) -(AndNotInt64x4 x y) => (VPANDN256 y x) -(EqualInt64x4 x y) => (VPCMPEQQ256 y x) -(GreaterInt64x4 x y) => (VPCMPGTQ256 y x) -(MaxInt64x4 x y) => (VPMAXSQ256 y x) -(MinInt64x4 x y) => (VPMINSQ256 y x) -(MulEvenWidenInt64x4 x y) => (VPMULDQ256 y x) -(MulLowInt64x4 x y) => (VPMULLQ256 y x) -(OrInt64x4 x y) => (VPOR256 y x) -(PopCountInt64x4 x) => (VPOPCNTQ256 x) -(SubInt64x4 x y) => (VPSUBQ256 y x) -(XorInt64x4 x y) => (VPXOR256 y x) -(AbsoluteInt64x8 x) => (VPABSQ512 x) -(AddInt64x8 x y) => (VPADDQ512 y x) -(AndInt64x8 x y) => (VPANDQ512 y x) -(AndNotInt64x8 x y) => (VPANDNQ512 y x) -(MaxInt64x8 x y) => (VPMAXSQ512 y x) -(MinInt64x8 x y) => (VPMINSQ512 y x) -(MulEvenWidenInt64x8 x y) => (VPMULDQ512 y x) -(MulLowInt64x8 x y) => (VPMULLQ512 y x) -(OrInt64x8 x y) => (VPORQ512 y x) -(PopCountInt64x8 x) => (VPOPCNTQ512 x) -(SubInt64x8 x y) => (VPSUBQ512 y x) -(XorInt64x8 x y) => (VPXORQ512 y x) -(AbsoluteInt8x16 x) => (VPABSB128 x) -(AddInt8x16 x y) => (VPADDB128 y x) -(AndInt8x16 x y) => (VPAND128 y x) -(AndNotInt8x16 x y) => (VPANDN128 y x) -(EqualInt8x16 x y) => (VPCMPEQB128 y x) -(GreaterInt8x16 x y) => (VPCMPGTB128 y x) -(MaxInt8x16 x y) => (VPMAXSB128 y x) -(MinInt8x16 x y) => (VPMINSB128 y x) -(OrInt8x16 x y) => (VPOR128 y x) -(PopCountInt8x16 x) => (VPOPCNTB128 x) -(SaturatedAddInt8x16 x y) => (VPADDSB128 y x) -(SaturatedSubInt8x16 x y) => (VPSUBSB128 y x) -(SignInt8x16 x y) => (VPSIGNB128 y x) -(SubInt8x16 x y) => (VPSUBB128 y x) -(XorInt8x16 x y) => (VPXOR128 y x) -(AbsoluteInt8x32 x) => (VPABSB256 x) -(AddInt8x32 x y) => (VPADDB256 y x) -(AndInt8x32 x y) => (VPAND256 y x) -(AndNotInt8x32 x y) => (VPANDN256 y x) -(EqualInt8x32 x y) => (VPCMPEQB256 y x) -(GreaterInt8x32 x y) => (VPCMPGTB256 y x) -(MaxInt8x32 x y) => (VPMAXSB256 y x) -(MinInt8x32 x y) => (VPMINSB256 y x) -(OrInt8x32 x y) => (VPOR256 y x) -(PopCountInt8x32 x) => (VPOPCNTB256 x) -(SaturatedAddInt8x32 x y) => (VPADDSB256 y x) -(SaturatedSubInt8x32 x y) => (VPSUBSB256 y x) -(SignInt8x32 x y) => (VPSIGNB256 y x) -(SubInt8x32 x y) => (VPSUBB256 y x) -(XorInt8x32 x y) => (VPXOR256 y x) -(AbsoluteInt8x64 x) => (VPABSB512 x) -(AddInt8x64 x y) => (VPADDB512 y x) -(MaxInt8x64 x y) => (VPMAXSB512 y x) -(MinInt8x64 x y) => (VPMINSB512 y x) -(PopCountInt8x64 x) => (VPOPCNTB512 x) -(SaturatedAddInt8x64 x y) => (VPADDSB512 y x) -(SaturatedSubInt8x64 x y) => (VPSUBSB512 y x) -(SubInt8x64 x y) => (VPSUBB512 y x) -(AddUint16x16 x y) => (VPADDW256 y x) -(AndUint16x16 x y) => (VPAND256 y x) -(AndNotUint16x16 x y) => (VPANDN256 y x) -(AverageUint16x16 x y) => (VPAVGW256 y x) -(MaxUint16x16 x y) => (VPMAXUW256 y x) -(MinUint16x16 x y) => (VPMINUW256 y x) -(MulHighUint16x16 x y) => (VPMULHUW256 y x) -(OrUint16x16 x y) => (VPOR256 y x) -(PairwiseAddUint16x16 x y) => (VPHADDW256 y x) -(PairwiseSubUint16x16 x y) => (VPHSUBW256 y x) -(PopCountUint16x16 x) => (VPOPCNTW256 x) -(SaturatedAddUint16x16 x y) => (VPADDSW256 y x) -(SaturatedSubUint16x16 x y) => (VPSUBSW256 y x) -(SubUint16x16 x y) => (VPSUBW256 y x) -(XorUint16x16 x y) => (VPXOR256 y x) -(AddUint16x32 x y) => (VPADDW512 y x) -(AverageUint16x32 x y) => (VPAVGW512 y x) -(MaxUint16x32 x y) => (VPMAXUW512 y x) -(MinUint16x32 x y) => (VPMINUW512 y x) -(MulHighUint16x32 x y) => (VPMULHUW512 y x) -(PopCountUint16x32 x) => (VPOPCNTW512 x) -(SaturatedAddUint16x32 x y) => (VPADDSW512 y x) -(SaturatedSubUint16x32 x y) => (VPSUBSW512 y x) -(SubUint16x32 x y) => (VPSUBW512 y x) -(AddUint16x8 x y) => (VPADDW128 y x) -(AndUint16x8 x y) => (VPAND128 y x) -(AndNotUint16x8 x y) => (VPANDN128 y x) -(AverageUint16x8 x y) => (VPAVGW128 y x) -(MaxUint16x8 x y) => (VPMAXUW128 y x) -(MinUint16x8 x y) => (VPMINUW128 y x) -(MulHighUint16x8 x y) => (VPMULHUW128 y x) -(OrUint16x8 x y) => (VPOR128 y x) -(PairwiseAddUint16x8 x y) => (VPHADDW128 y x) -(PairwiseSubUint16x8 x y) => (VPHSUBW128 y x) -(PopCountUint16x8 x) => (VPOPCNTW128 x) -(SaturatedAddUint16x8 x y) => (VPADDSW128 y x) -(SaturatedSubUint16x8 x y) => (VPSUBSW128 y x) -(SubUint16x8 x y) => (VPSUBW128 y x) -(XorUint16x8 x y) => (VPXOR128 y x) -(AddUint32x16 x y) => (VPADDD512 y x) -(AndUint32x16 x y) => (VPANDD512 y x) -(AndNotUint32x16 x y) => (VPANDND512 y x) -(MaxUint32x16 x y) => (VPMAXUD512 y x) -(MinUint32x16 x y) => (VPMINUD512 y x) -(OrUint32x16 x y) => (VPORD512 y x) -(PopCountUint32x16 x) => (VPOPCNTD512 x) -(SubUint32x16 x y) => (VPSUBD512 y x) -(XorUint32x16 x y) => (VPXORD512 y x) -(AddUint32x4 x y) => (VPADDD128 y x) -(AndUint32x4 x y) => (VPAND128 y x) -(AndNotUint32x4 x y) => (VPANDN128 y x) -(MaxUint32x4 x y) => (VPMAXUD128 y x) -(MinUint32x4 x y) => (VPMINUD128 y x) -(MulEvenWidenUint32x4 x y) => (VPMULUDQ128 y x) -(OrUint32x4 x y) => (VPOR128 y x) -(PairwiseAddUint32x4 x y) => (VPHADDD128 y x) -(PairwiseSubUint32x4 x y) => (VPHSUBD128 y x) -(PopCountUint32x4 x) => (VPOPCNTD128 x) -(SubUint32x4 x y) => (VPSUBD128 y x) -(XorUint32x4 x y) => (VPXOR128 y x) -(AddUint32x8 x y) => (VPADDD256 y x) -(AndUint32x8 x y) => (VPAND256 y x) -(AndNotUint32x8 x y) => (VPANDN256 y x) -(MaxUint32x8 x y) => (VPMAXUD256 y x) -(MinUint32x8 x y) => (VPMINUD256 y x) -(MulEvenWidenUint32x8 x y) => (VPMULUDQ256 y x) -(OrUint32x8 x y) => (VPOR256 y x) -(PairwiseAddUint32x8 x y) => (VPHADDD256 y x) -(PairwiseSubUint32x8 x y) => (VPHSUBD256 y x) -(PopCountUint32x8 x) => (VPOPCNTD256 x) -(SubUint32x8 x y) => (VPSUBD256 y x) -(XorUint32x8 x y) => (VPXOR256 y x) -(AddUint64x2 x y) => (VPADDQ128 y x) -(AndUint64x2 x y) => (VPAND128 y x) -(AndNotUint64x2 x y) => (VPANDN128 y x) -(MaxUint64x2 x y) => (VPMAXUQ128 y x) -(MinUint64x2 x y) => (VPMINUQ128 y x) -(MulEvenWidenUint64x2 x y) => (VPMULUDQ128 y x) -(OrUint64x2 x y) => (VPOR128 y x) -(PopCountUint64x2 x) => (VPOPCNTQ128 x) -(SubUint64x2 x y) => (VPSUBQ128 y x) -(XorUint64x2 x y) => (VPXOR128 y x) -(AddUint64x4 x y) => (VPADDQ256 y x) -(AndUint64x4 x y) => (VPAND256 y x) -(AndNotUint64x4 x y) => (VPANDN256 y x) -(MaxUint64x4 x y) => (VPMAXUQ256 y x) -(MinUint64x4 x y) => (VPMINUQ256 y x) -(MulEvenWidenUint64x4 x y) => (VPMULUDQ256 y x) -(OrUint64x4 x y) => (VPOR256 y x) -(PopCountUint64x4 x) => (VPOPCNTQ256 x) -(SubUint64x4 x y) => (VPSUBQ256 y x) -(XorUint64x4 x y) => (VPXOR256 y x) -(AddUint64x8 x y) => (VPADDQ512 y x) -(AndUint64x8 x y) => (VPANDQ512 y x) -(AndNotUint64x8 x y) => (VPANDNQ512 y x) -(MaxUint64x8 x y) => (VPMAXUQ512 y x) -(MinUint64x8 x y) => (VPMINUQ512 y x) -(MulEvenWidenUint64x8 x y) => (VPMULUDQ512 y x) -(OrUint64x8 x y) => (VPORQ512 y x) -(PopCountUint64x8 x) => (VPOPCNTQ512 x) -(SubUint64x8 x y) => (VPSUBQ512 y x) -(XorUint64x8 x y) => (VPXORQ512 y x) -(AddUint8x16 x y) => (VPADDB128 y x) -(AndUint8x16 x y) => (VPAND128 y x) -(AndNotUint8x16 x y) => (VPANDN128 y x) -(AverageUint8x16 x y) => (VPAVGB128 y x) -(MaxUint8x16 x y) => (VPMAXUB128 y x) -(MinUint8x16 x y) => (VPMINUB128 y x) -(OrUint8x16 x y) => (VPOR128 y x) -(PopCountUint8x16 x) => (VPOPCNTB128 x) -(SaturatedAddUint8x16 x y) => (VPADDSB128 y x) -(SaturatedSubUint8x16 x y) => (VPSUBSB128 y x) -(SubUint8x16 x y) => (VPSUBB128 y x) -(XorUint8x16 x y) => (VPXOR128 y x) -(AddUint8x32 x y) => (VPADDB256 y x) -(AndUint8x32 x y) => (VPAND256 y x) -(AndNotUint8x32 x y) => (VPANDN256 y x) -(AverageUint8x32 x y) => (VPAVGB256 y x) -(MaxUint8x32 x y) => (VPMAXUB256 y x) -(MinUint8x32 x y) => (VPMINUB256 y x) -(OrUint8x32 x y) => (VPOR256 y x) -(PopCountUint8x32 x) => (VPOPCNTB256 x) -(SaturatedAddUint8x32 x y) => (VPADDSB256 y x) -(SaturatedSubUint8x32 x y) => (VPSUBSB256 y x) -(SubUint8x32 x y) => (VPSUBB256 y x) -(XorUint8x32 x y) => (VPXOR256 y x) -(AddUint8x64 x y) => (VPADDB512 y x) -(AverageUint8x64 x y) => (VPAVGB512 y x) -(MaxUint8x64 x y) => (VPMAXUB512 y x) -(MinUint8x64 x y) => (VPMINUB512 y x) -(PopCountUint8x64 x) => (VPOPCNTB512 x) -(SaturatedAddUint8x64 x y) => (VPADDSB512 y x) -(SaturatedSubUint8x64 x y) => (VPSUBSB512 y x) -(SubUint8x64 x y) => (VPSUBB512 y x) -(EqualFloat32x4 x y) => (VCMPPS128 [0] y x) -(EqualFloat64x4 x y) => (VCMPPD256 [0] y x) -(EqualFloat32x8 x y) => (VCMPPS256 [0] y x) -(EqualFloat64x2 x y) => (VCMPPD128 [0] y x) -(GreaterFloat32x8 x y) => (VCMPPS256 [6] y x) -(GreaterFloat64x4 x y) => (VCMPPD256 [6] y x) -(GreaterFloat64x2 x y) => (VCMPPD128 [6] y x) -(GreaterFloat32x4 x y) => (VCMPPS128 [6] y x) -(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] y x) -(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] y x) -(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] y x) -(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] y x) -(IsNanFloat32x8 x y) => (VCMPPS256 [3] y x) -(IsNanFloat64x2 x y) => (VCMPPD128 [3] y x) -(IsNanFloat32x4 x y) => (VCMPPS128 [3] y x) -(IsNanFloat64x4 x y) => (VCMPPD256 [3] y x) -(LessFloat32x4 x y) => (VCMPPS128 [1] y x) -(LessFloat64x4 x y) => (VCMPPD256 [1] y x) -(LessFloat64x2 x y) => (VCMPPD128 [1] y x) -(LessFloat32x8 x y) => (VCMPPS256 [1] y x) -(LessEqualFloat32x4 x y) => (VCMPPS128 [2] y x) -(LessEqualFloat64x4 x y) => (VCMPPD256 [2] y x) -(LessEqualFloat64x2 x y) => (VCMPPD128 [2] y x) -(LessEqualFloat32x8 x y) => (VCMPPS256 [2] y x) -(NotEqualFloat64x2 x y) => (VCMPPD128 [4] y x) -(NotEqualFloat32x4 x y) => (VCMPPS128 [4] y x) -(NotEqualFloat32x8 x y) => (VCMPPS256 [4] y x) -(NotEqualFloat64x4 x y) => (VCMPPD256 [4] y x) -(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) +(AbsoluteInt16x16 ...) => (VPABSW256 ...) +(AbsoluteInt16x32 ...) => (VPABSW512 ...) +(AbsoluteInt16x8 ...) => (VPABSW128 ...) +(AbsoluteInt32x16 ...) => (VPABSD512 ...) +(AbsoluteInt32x4 ...) => (VPABSD128 ...) +(AbsoluteInt32x8 ...) => (VPABSD256 ...) +(AbsoluteInt64x2 ...) => (VPABSQ128 ...) +(AbsoluteInt64x4 ...) => (VPABSQ256 ...) +(AbsoluteInt64x8 ...) => (VPABSQ512 ...) +(AbsoluteInt8x16 ...) => (VPABSB128 ...) +(AbsoluteInt8x32 ...) => (VPABSB256 ...) +(AbsoluteInt8x64 ...) => (VPABSB512 ...) +(AddFloat32x16 ...) => (VADDPS512 ...) +(AddFloat32x4 ...) => (VADDPS128 ...) +(AddFloat32x8 ...) => (VADDPS256 ...) +(AddFloat64x2 ...) => (VADDPD128 ...) +(AddFloat64x4 ...) => (VADDPD256 ...) +(AddFloat64x8 ...) => (VADDPD512 ...) +(AddInt16x16 ...) => (VPADDW256 ...) +(AddInt16x32 ...) => (VPADDW512 ...) +(AddInt16x8 ...) => (VPADDW128 ...) +(AddInt32x16 ...) => (VPADDD512 ...) +(AddInt32x4 ...) => (VPADDD128 ...) +(AddInt32x8 ...) => (VPADDD256 ...) +(AddInt64x2 ...) => (VPADDQ128 ...) +(AddInt64x4 ...) => (VPADDQ256 ...) +(AddInt64x8 ...) => (VPADDQ512 ...) +(AddInt8x16 ...) => (VPADDB128 ...) +(AddInt8x32 ...) => (VPADDB256 ...) +(AddInt8x64 ...) => (VPADDB512 ...) +(AddUint16x16 ...) => (VPADDW256 ...) +(AddUint16x32 ...) => (VPADDW512 ...) +(AddUint16x8 ...) => (VPADDW128 ...) +(AddUint32x16 ...) => (VPADDD512 ...) +(AddUint32x4 ...) => (VPADDD128 ...) +(AddUint32x8 ...) => (VPADDD256 ...) +(AddUint64x2 ...) => (VPADDQ128 ...) +(AddUint64x4 ...) => (VPADDQ256 ...) +(AddUint64x8 ...) => (VPADDQ512 ...) +(AddUint8x16 ...) => (VPADDB128 ...) +(AddUint8x32 ...) => (VPADDB256 ...) +(AddUint8x64 ...) => (VPADDB512 ...) +(AndFloat32x16 ...) => (VANDPS512 ...) +(AndFloat32x4 ...) => (VANDPS128 ...) +(AndFloat32x8 ...) => (VANDPS256 ...) +(AndFloat64x2 ...) => (VANDPD128 ...) +(AndFloat64x4 ...) => (VANDPD256 ...) +(AndFloat64x8 ...) => (VANDPD512 ...) +(AndInt16x16 ...) => (VPAND256 ...) +(AndInt16x8 ...) => (VPAND128 ...) +(AndInt32x16 ...) => (VPANDD512 ...) +(AndInt32x4 ...) => (VPAND128 ...) +(AndInt32x8 ...) => (VPAND256 ...) +(AndInt64x2 ...) => (VPAND128 ...) +(AndInt64x4 ...) => (VPAND256 ...) +(AndInt64x8 ...) => (VPANDQ512 ...) +(AndInt8x16 ...) => (VPAND128 ...) +(AndInt8x32 ...) => (VPAND256 ...) +(AndUint16x16 ...) => (VPAND256 ...) +(AndUint16x8 ...) => (VPAND128 ...) +(AndUint32x16 ...) => (VPANDD512 ...) +(AndUint32x4 ...) => (VPAND128 ...) +(AndUint32x8 ...) => (VPAND256 ...) +(AndUint64x2 ...) => (VPAND128 ...) +(AndUint64x4 ...) => (VPAND256 ...) +(AndUint64x8 ...) => (VPANDQ512 ...) +(AndUint8x16 ...) => (VPAND128 ...) +(AndUint8x32 ...) => (VPAND256 ...) +(AndNotFloat32x16 ...) => (VANDNPS512 ...) +(AndNotFloat32x4 ...) => (VANDNPS128 ...) +(AndNotFloat32x8 ...) => (VANDNPS256 ...) +(AndNotFloat64x2 ...) => (VANDNPD128 ...) +(AndNotFloat64x4 ...) => (VANDNPD256 ...) +(AndNotFloat64x8 ...) => (VANDNPD512 ...) +(AndNotInt16x16 ...) => (VPANDN256 ...) +(AndNotInt16x8 ...) => (VPANDN128 ...) +(AndNotInt32x16 ...) => (VPANDND512 ...) +(AndNotInt32x4 ...) => (VPANDN128 ...) +(AndNotInt32x8 ...) => (VPANDN256 ...) +(AndNotInt64x2 ...) => (VPANDN128 ...) +(AndNotInt64x4 ...) => (VPANDN256 ...) +(AndNotInt64x8 ...) => (VPANDNQ512 ...) +(AndNotInt8x16 ...) => (VPANDN128 ...) +(AndNotInt8x32 ...) => (VPANDN256 ...) +(AndNotUint16x16 ...) => (VPANDN256 ...) +(AndNotUint16x8 ...) => (VPANDN128 ...) +(AndNotUint32x16 ...) => (VPANDND512 ...) +(AndNotUint32x4 ...) => (VPANDN128 ...) +(AndNotUint32x8 ...) => (VPANDN256 ...) +(AndNotUint64x2 ...) => (VPANDN128 ...) +(AndNotUint64x4 ...) => (VPANDN256 ...) +(AndNotUint64x8 ...) => (VPANDNQ512 ...) +(AndNotUint8x16 ...) => (VPANDN128 ...) +(AndNotUint8x32 ...) => (VPANDN256 ...) +(ApproximateReciprocalFloat32x16 ...) => (VRCP14PS512 ...) +(ApproximateReciprocalFloat32x4 ...) => (VRCP14PS128 ...) +(ApproximateReciprocalFloat32x8 ...) => (VRCP14PS256 ...) +(ApproximateReciprocalFloat64x2 ...) => (VRCP14PD128 ...) +(ApproximateReciprocalFloat64x4 ...) => (VRCP14PD256 ...) +(ApproximateReciprocalFloat64x8 ...) => (VRCP14PD512 ...) +(ApproximateReciprocalOfSqrtFloat32x16 ...) => (VRSQRT14PS512 ...) +(ApproximateReciprocalOfSqrtFloat32x4 ...) => (VRSQRTPS128 ...) +(ApproximateReciprocalOfSqrtFloat32x8 ...) => (VRSQRTPS256 ...) +(ApproximateReciprocalOfSqrtFloat64x2 ...) => (VRSQRT14PD128 ...) +(ApproximateReciprocalOfSqrtFloat64x4 ...) => (VRSQRT14PD256 ...) +(ApproximateReciprocalOfSqrtFloat64x8 ...) => (VRSQRT14PD512 ...) +(AverageUint16x16 ...) => (VPAVGW256 ...) +(AverageUint16x32 ...) => (VPAVGW512 ...) +(AverageUint16x8 ...) => (VPAVGW128 ...) +(AverageUint8x16 ...) => (VPAVGB128 ...) +(AverageUint8x32 ...) => (VPAVGB256 ...) +(AverageUint8x64 ...) => (VPAVGB512 ...) +(DivFloat32x16 ...) => (VDIVPS512 ...) +(DivFloat32x4 ...) => (VDIVPS128 ...) +(DivFloat32x8 ...) => (VDIVPS256 ...) +(DivFloat64x2 ...) => (VDIVPD128 ...) +(DivFloat64x4 ...) => (VDIVPD256 ...) +(DivFloat64x8 ...) => (VDIVPD512 ...) +(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) +(EqualFloat32x4 x y) => (VCMPPS128 [0] x y) +(EqualFloat32x8 x y) => (VCMPPS256 [0] x y) +(EqualFloat64x2 x y) => (VCMPPD128 [0] x y) +(EqualFloat64x4 x y) => (VCMPPD256 [0] x y) +(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) +(EqualInt16x16 ...) => (VPCMPEQW256 ...) +(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 x y)) +(EqualInt16x8 ...) => (VPCMPEQW128 ...) +(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPEQD512 x y)) +(EqualInt32x4 ...) => (VPCMPEQD128 ...) +(EqualInt32x8 ...) => (VPCMPEQD256 ...) +(EqualInt64x2 ...) => (VPCMPEQQ128 ...) +(EqualInt64x4 ...) => (VPCMPEQQ256 ...) +(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) +(EqualInt8x16 ...) => (VPCMPEQB128 ...) +(EqualInt8x32 ...) => (VPCMPEQB256 ...) +(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPEQB512 x y)) +(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) +(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) +(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) +(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) +(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) +(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) +(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) +(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) +(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) +(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) +(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) +(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) +(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) +(GreaterFloat32x4 x y) => (VCMPPS128 [6] x y) +(GreaterFloat32x8 x y) => (VCMPPS256 [6] x y) +(GreaterFloat64x2 x y) => (VCMPPD128 [6] x y) +(GreaterFloat64x4 x y) => (VCMPPD256 [6] x y) +(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) +(GreaterInt16x16 ...) => (VPCMPGTW256 ...) +(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 x y)) +(GreaterInt16x8 ...) => (VPCMPGTW128 ...) +(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPGTD512 x y)) +(GreaterInt32x4 ...) => (VPCMPGTD128 ...) +(GreaterInt32x8 ...) => (VPCMPGTD256 ...) +(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) +(GreaterInt64x4 ...) => (VPCMPGTQ256 ...) +(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) +(GreaterInt8x16 ...) => (VPCMPGTB128 ...) +(GreaterInt8x32 ...) => (VPCMPGTB256 ...) +(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPGTB512 x y)) +(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) +(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) +(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) +(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) +(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) +(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) +(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) +(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) +(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) +(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) +(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) +(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) +(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) +(GreaterEqualFloat32x4 x y) => (VCMPPS128 [5] x y) +(GreaterEqualFloat32x8 x y) => (VCMPPS256 [5] x y) +(GreaterEqualFloat64x2 x y) => (VCMPPD128 [5] x y) +(GreaterEqualFloat64x4 x y) => (VCMPPD256 [5] x y) +(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) +(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) +(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) +(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) +(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) +(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) +(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) +(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) +(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) +(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) +(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) +(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) +(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) +(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) +(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) +(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) +(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) +(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) +(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) +(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) +(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) +(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) +(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) +(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) +(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) +(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) +(IsNanFloat32x4 x y) => (VCMPPS128 [3] x y) +(IsNanFloat32x8 x y) => (VCMPPS256 [3] x y) +(IsNanFloat64x2 x y) => (VCMPPD128 [3] x y) +(IsNanFloat64x4 x y) => (VCMPPD256 [3] x y) +(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) +(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) +(LessFloat32x4 x y) => (VCMPPS128 [1] x y) +(LessFloat32x8 x y) => (VCMPPS256 [1] x y) +(LessFloat64x2 x y) => (VCMPPD128 [1] x y) +(LessFloat64x4 x y) => (VCMPPD256 [1] x y) +(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) +(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) +(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) +(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) +(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) +(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) +(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) +(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) +(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) +(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) +(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) +(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) +(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) +(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) +(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) +(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) +(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) +(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) +(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) +(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) +(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) +(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) +(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) +(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) +(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) +(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) +(LessEqualFloat32x4 x y) => (VCMPPS128 [2] x y) +(LessEqualFloat32x8 x y) => (VCMPPS256 [2] x y) +(LessEqualFloat64x2 x y) => (VCMPPD128 [2] x y) +(LessEqualFloat64x4 x y) => (VCMPPD256 [2] x y) +(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) +(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) +(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) +(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) +(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) +(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) +(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) +(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) +(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) +(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) +(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) +(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) +(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) +(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) +(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) +(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) +(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) +(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) +(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) +(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) +(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) +(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) +(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) +(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) +(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) +(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) +(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) +(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) +(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) +(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) +(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) +(MaskedAddFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedAndFloat32x16 x y mask) => (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotFloat32x16 x y mask) => (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat32x16 x mask) => (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndFloat32x4 x y mask) => (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotFloat32x4 x y mask) => (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedApproximateReciprocalFloat32x4 x mask) => (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndFloat32x8 x y mask) => (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotFloat32x8 x y mask) => (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedApproximateReciprocalFloat32x8 x mask) => (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndFloat64x2 x y mask) => (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotFloat64x2 x y mask) => (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedApproximateReciprocalFloat64x2 x mask) => (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndFloat64x4 x y mask) => (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotFloat64x4 x y mask) => (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalFloat64x4 x mask) => (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndFloat64x8 x y mask) => (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotFloat64x8 x y mask) => (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedApproximateReciprocalFloat64x8 x mask) => (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) => (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) => (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) => (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) => (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) => (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) => (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt16x16 x mask) => (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedAddInt16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedDivFloat32x16 x y mask) => (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedDivFloat32x4 x y mask) => (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedDivFloat32x8 x y mask) => (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedDivFloat64x2 x y mask) => (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedDivFloat64x4 x y mask) => (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedDivFloat64x8 x y mask) => (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) +(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) +(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) +(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) +(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) +(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) +(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) +(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) +(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) +(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) +(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) +(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) +(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) +(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) +(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) +(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) +(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) +(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) +(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) +(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) +(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) +(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) +(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) +(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) +(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) +(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) +(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) +(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) +(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) +(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) +(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) +(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) +(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) +(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) +(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) +(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) +(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) +(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) +(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) +(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) +(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) +(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) +(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) +(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) +(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) +(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) +(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) +(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) +(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) +(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) +(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) +(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) +(MaskedMaxFloat32x16 x y mask) => (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxFloat32x4 x y mask) => (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxFloat32x8 x y mask) => (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxFloat64x2 x y mask) => (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxFloat64x4 x y mask) => (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxFloat64x8 x y mask) => (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt16x16 x y mask) => (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinFloat32x16 x y mask) => (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinFloat32x4 x y mask) => (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinFloat32x8 x y mask) => (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinFloat64x2 x y mask) => (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinFloat64x4 x y mask) => (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinFloat64x8 x y mask) => (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt16x16 x y mask) => (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedMulFloat32x16 x y mask) => (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulFloat32x4 x y mask) => (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulFloat32x8 x y mask) => (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulFloat64x2 x y mask) => (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulFloat64x4 x y mask) => (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulFloat64x8 x y mask) => (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulByPowOf2Float32x16 x y mask) => (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulByPowOf2Float32x4 x y mask) => (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulByPowOf2Float32x8 x y mask) => (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulByPowOf2Float64x2 x y mask) => (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulByPowOf2Float64x4 x y mask) => (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulByPowOf2Float64x8 x y mask) => (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedMulHighInt16x16 x y mask) => (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt16x16 x y mask) => (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) +(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) +(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) +(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) +(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) +(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) +(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) +(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) +(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) +(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) +(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) +(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) +(MaskedOrFloat32x16 x y mask) => (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrFloat32x4 x y mask) => (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrFloat32x8 x y mask) => (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrFloat64x2 x y mask) => (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrFloat64x4 x y mask) => (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrFloat64x8 x y mask) => (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrInt32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrInt32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrInt32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrInt64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrInt64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrInt64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedOrUint32x16 x y mask) => (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedOrUint32x4 x y mask) => (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedOrUint32x8 x y mask) => (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedOrUint64x2 x y mask) => (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedOrUint64x4 x y mask) => (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedOrUint64x8 x y mask) => (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) (MaskedPopCountInt16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAbsoluteInt16x32 x mask) => (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedAddInt16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxInt16x32 x y mask) => (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinInt16x32 x y mask) => (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighInt16x32 x y mask) => (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulLowInt16x32 x y mask) => (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountInt16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAbsoluteInt16x8 x mask) => (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedAddInt16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxInt16x8 x y mask) => (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinInt16x8 x y mask) => (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighInt16x8 x y mask) => (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulLowInt16x8 x y mask) => (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountInt16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAbsoluteInt32x16 x mask) => (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedAddInt32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndInt32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotInt32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxInt32x16 x y mask) => (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinInt32x16 x y mask) => (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMulLowInt32x16 x y mask) => (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrInt32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountInt32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAbsoluteInt32x4 x mask) => (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedAddInt32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndInt32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotInt32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxInt32x4 x y mask) => (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinInt32x4 x y mask) => (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMulLowInt32x4 x y mask) => (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrInt32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountInt32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAbsoluteInt32x8 x mask) => (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedAddInt32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndInt32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotInt32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxInt32x8 x y mask) => (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinInt32x8 x y mask) => (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMulLowInt32x8 x y mask) => (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrInt32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountInt32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAbsoluteInt64x2 x mask) => (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedAddInt64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndInt64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotInt64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxInt64x2 x y mask) => (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinInt64x2 x y mask) => (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenInt64x2 x y mask) => (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulLowInt64x2 x y mask) => (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrInt64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountInt64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAbsoluteInt64x4 x mask) => (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedAddInt64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndInt64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotInt64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxInt64x4 x y mask) => (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinInt64x4 x y mask) => (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenInt64x4 x y mask) => (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulLowInt64x4 x y mask) => (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrInt64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountInt64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAbsoluteInt64x8 x mask) => (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedAddInt64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndInt64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotInt64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxInt64x8 x y mask) => (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinInt64x8 x y mask) => (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenInt64x8 x y mask) => (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulLowInt64x8 x y mask) => (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrInt64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountInt64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAbsoluteInt8x16 x mask) => (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedAddInt8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxInt8x16 x y mask) => (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinInt8x16 x y mask) => (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountInt8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAbsoluteInt8x32 x mask) => (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedAddInt8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxInt8x32 x y mask) => (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinInt8x32 x y mask) => (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountInt8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAbsoluteInt8x64 x mask) => (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedAddInt8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxInt8x64 x y mask) => (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinInt8x64 x y mask) => (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountInt8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAddUint16x16 x y mask) => (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAverageUint16x16 x y mask) => (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMaxUint16x16 x y mask) => (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMinUint16x16 x y mask) => (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedMulHighUint16x16 x y mask) => (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) (MaskedPopCountUint16x16 x mask) => (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) -(MaskedAddUint16x32 x y mask) => (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAverageUint16x32 x y mask) => (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMaxUint16x32 x y mask) => (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMinUint16x32 x y mask) => (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedMulHighUint16x32 x y mask) => (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) (MaskedPopCountUint16x32 x mask) => (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) -(MaskedAddUint16x8 x y mask) => (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAverageUint16x8 x y mask) => (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMaxUint16x8 x y mask) => (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMinUint16x8 x y mask) => (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedMulHighUint16x8 x y mask) => (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) (MaskedPopCountUint16x8 x mask) => (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) -(MaskedAddUint32x16 x y mask) => (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndUint32x16 x y mask) => (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAndNotUint32x16 x y mask) => (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMaxUint32x16 x y mask) => (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedMinUint32x16 x y mask) => (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedOrUint32x16 x y mask) => (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) (MaskedPopCountUint32x16 x mask) => (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) -(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) -(MaskedAddUint32x4 x y mask) => (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndUint32x4 x y mask) => (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAndNotUint32x4 x y mask) => (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMaxUint32x4 x y mask) => (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedMinUint32x4 x y mask) => (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedOrUint32x4 x y mask) => (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) (MaskedPopCountUint32x4 x mask) => (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) -(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) -(MaskedAddUint32x8 x y mask) => (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndUint32x8 x y mask) => (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAndNotUint32x8 x y mask) => (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMaxUint32x8 x y mask) => (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedMinUint32x8 x y mask) => (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedOrUint32x8 x y mask) => (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) (MaskedPopCountUint32x8 x mask) => (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) -(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) -(MaskedAddUint64x2 x y mask) => (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndUint64x2 x y mask) => (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAndNotUint64x2 x y mask) => (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMaxUint64x2 x y mask) => (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMinUint64x2 x y mask) => (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedMulEvenWidenUint64x2 x y mask) => (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedOrUint64x2 x y mask) => (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) (MaskedPopCountUint64x2 x mask) => (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) -(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) -(MaskedAddUint64x4 x y mask) => (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndUint64x4 x y mask) => (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAndNotUint64x4 x y mask) => (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMaxUint64x4 x y mask) => (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMinUint64x4 x y mask) => (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedMulEvenWidenUint64x4 x y mask) => (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedOrUint64x4 x y mask) => (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) (MaskedPopCountUint64x4 x mask) => (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) -(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) -(MaskedAddUint64x8 x y mask) => (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndUint64x8 x y mask) => (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAndNotUint64x8 x y mask) => (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMaxUint64x8 x y mask) => (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMinUint64x8 x y mask) => (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedMulEvenWidenUint64x8 x y mask) => (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedOrUint64x8 x y mask) => (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) (MaskedPopCountUint64x8 x mask) => (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) -(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) -(MaskedAddUint8x16 x y mask) => (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAverageUint8x16 x y mask) => (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMaxUint8x16 x y mask) => (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedMinUint8x16 x y mask) => (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) (MaskedPopCountUint8x16 x mask) => (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) -(MaskedAddUint8x32 x y mask) => (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAverageUint8x32 x y mask) => (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMaxUint8x32 x y mask) => (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedMinUint8x32 x y mask) => (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) (MaskedPopCountUint8x32 x mask) => (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) -(MaskedAddUint8x64 x y mask) => (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedAverageUint8x64 x y mask) => (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMaxUint8x64 x y mask) => (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedMinUint8x64 x y mask) => (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) (MaskedPopCountUint8x64 x mask) => (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) -(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) -(EqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPEQW512 y x)) -(GreaterInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPGTW512 y x)) -(GreaterInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) -(EqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) -(GreaterInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) -(EqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) -(EqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) -(EqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) -(EqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) -(EqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) -(EqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) -(EqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) -(EqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) -(EqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) -(EqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) -(EqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) -(EqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) -(EqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) -(EqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) -(EqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) -(EqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) -(GreaterInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) -(GreaterUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) -(GreaterUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) -(GreaterInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) -(GreaterUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) -(GreaterUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) -(GreaterUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) -(GreaterUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) -(GreaterUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) -(GreaterFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) -(GreaterFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) -(GreaterUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) -(GreaterUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) -(GreaterUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) -(GreaterUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) -(GreaterUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) -(GreaterEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) -(GreaterEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) -(GreaterEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) -(GreaterEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) -(GreaterEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) -(GreaterEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) -(GreaterEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) -(GreaterEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) -(GreaterEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) -(GreaterEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) -(GreaterEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) -(GreaterEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) -(GreaterEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) -(GreaterEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) -(GreaterEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) -(GreaterEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) -(GreaterEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) -(GreaterEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) -(GreaterEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) -(GreaterEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) -(GreaterEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) -(GreaterEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) -(GreaterEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) -(GreaterEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) -(GreaterEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) -(GreaterEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) -(IsNanFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) -(IsNanFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) -(LessInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) -(LessInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) -(LessInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) -(LessUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) -(LessUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) -(LessInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) -(LessInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) -(LessUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) -(LessFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) -(LessUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) -(LessUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) -(LessUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) -(LessUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) -(LessInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) -(LessUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) -(LessFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) -(LessInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) -(LessInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) -(LessUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) -(LessInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) -(LessUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) -(LessInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) -(LessInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) -(LessInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) -(LessUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) -(LessUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) -(LessEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) -(LessEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) -(LessEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) -(LessEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) -(LessEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) -(LessEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) -(LessEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) -(LessEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) -(LessEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) -(LessEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) -(LessEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) -(LessEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) -(LessEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) -(LessEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) -(LessEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) -(LessEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) -(LessEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) -(LessEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) -(LessEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) -(LessEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) -(LessEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) -(LessEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) -(LessEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) -(LessEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) -(LessEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) -(LessEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) -(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) -(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) -(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) -(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) -(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) -(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) -(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) -(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) -(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) -(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) -(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) -(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) -(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) -(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) -(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) -(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) -(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) -(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) -(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) -(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) -(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) -(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) -(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) -(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) -(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) -(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) -(MaskedEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) -(MaskedEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) -(MaskedEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) -(MaskedEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) -(MaskedEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) -(MaskedEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) -(MaskedEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) -(MaskedEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) -(MaskedEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) -(MaskedEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) -(MaskedEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) -(MaskedEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) -(MaskedEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) -(MaskedEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) -(MaskedEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) -(MaskedEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) -(MaskedEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) -(MaskedGreaterEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) -(MaskedGreaterEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) -(MaskedGreaterEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) -(MaskedGreaterEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) -(MaskedGreaterEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) -(MaskedGreaterEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) -(MaskedGreaterEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) -(MaskedGreaterEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedGreaterEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) -(MaskedGreaterEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) -(MaskedGreaterEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) -(MaskedGreaterEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) -(MaskedIsNanFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) -(MaskedIsNanFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) -(MaskedIsNanFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) -(MaskedIsNanFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) -(MaskedIsNanFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) -(MaskedLessInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) -(MaskedLessFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) -(MaskedLessInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) -(MaskedLessUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) -(MaskedLessUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) -(MaskedLessFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) -(MaskedLessFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) -(MaskedLessUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) -(MaskedLessInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) -(MaskedLessInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) -(MaskedLessInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedLessEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) -(MaskedLessEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) -(MaskedLessEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) -(MaskedLessEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) -(MaskedLessEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) -(MaskedLessEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) -(MaskedLessEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) -(MaskedLessEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) -(MaskedLessEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) -(MaskedLessEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) -(MaskedLessEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) -(MaskedLessEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualInt64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualUint32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualUint8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualUint64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualUint16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualUint32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualFloat32x16 x y mask) => (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualUint16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualInt8x32 x y mask) => (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) -(MaskedNotEqualInt32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualFloat64x4 x y mask) => (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualUint16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt8x64 x y mask) => (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) -(MaskedNotEqualInt16x8 x y mask) => (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) -(MaskedNotEqualInt16x16 x y mask) => (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) -(MaskedNotEqualUint64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualInt16x32 x y mask) => (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) -(MaskedNotEqualInt32x8 x y mask) => (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt32x16 x y mask) => (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) -(MaskedNotEqualUint32x4 x y mask) => (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint64x2 x y mask) => (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat64x8 x y mask) => (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) -(MaskedNotEqualFloat64x2 x y mask) => (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) -(MaskedNotEqualFloat32x8 x y mask) => (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) -(MaskedNotEqualInt64x4 x y mask) => (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) -(MaskedNotEqualFloat32x4 x y mask) => (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) -(MaskedNotEqualUint8x16 x y mask) => (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) -(MaskedNotEqualInt64x8 x y mask) => (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) +(MaskedSaturatedAddInt16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddInt16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddInt16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddInt8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddInt8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddInt8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedAddUint16x16 x y mask) => (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedAddUint16x32 x y mask) => (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedAddUint16x8 x y mask) => (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedAddUint8x16 x y mask) => (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedAddUint8x32 x y mask) => (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedAddUint8x64 x y mask) => (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubInt16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubInt16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubInt16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubInt8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubInt8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubInt8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSaturatedSubUint16x16 x y mask) => (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSaturatedSubUint16x32 x y mask) => (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSaturatedSubUint16x8 x y mask) => (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSaturatedSubUint8x16 x y mask) => (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSaturatedSubUint8x32 x y mask) => (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSaturatedSubUint8x64 x y mask) => (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSqrtFloat32x16 x mask) => (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) +(MaskedSqrtFloat32x4 x mask) => (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) +(MaskedSqrtFloat32x8 x mask) => (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) +(MaskedSqrtFloat64x2 x mask) => (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) +(MaskedSqrtFloat64x4 x mask) => (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) +(MaskedSqrtFloat64x8 x mask) => (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) +(MaskedSubFloat32x16 x y mask) => (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubFloat32x4 x y mask) => (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubFloat32x8 x y mask) => (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubFloat64x2 x y mask) => (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubFloat64x4 x y mask) => (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubFloat64x8 x y mask) => (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubInt16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubInt16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubInt32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubInt32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubInt32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubInt64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubInt64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubInt64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubInt8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubInt8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubInt8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedSubUint16x16 x y mask) => (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) +(MaskedSubUint16x32 x y mask) => (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) +(MaskedSubUint16x8 x y mask) => (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) +(MaskedSubUint32x16 x y mask) => (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedSubUint32x4 x y mask) => (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedSubUint32x8 x y mask) => (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedSubUint64x2 x y mask) => (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedSubUint64x4 x y mask) => (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedSubUint64x8 x y mask) => (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedSubUint8x16 x y mask) => (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) +(MaskedSubUint8x32 x y mask) => (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) +(MaskedSubUint8x64 x y mask) => (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) +(MaskedXorFloat32x16 x y mask) => (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorFloat32x4 x y mask) => (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorFloat32x8 x y mask) => (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorFloat64x2 x y mask) => (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorFloat64x4 x y mask) => (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorFloat64x8 x y mask) => (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorInt32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorInt32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorInt32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorInt64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorInt64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorInt64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaskedXorUint32x16 x y mask) => (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) +(MaskedXorUint32x4 x y mask) => (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) +(MaskedXorUint32x8 x y mask) => (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) +(MaskedXorUint64x2 x y mask) => (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) +(MaskedXorUint64x4 x y mask) => (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) +(MaskedXorUint64x8 x y mask) => (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) +(MaxFloat32x16 ...) => (VMAXPS512 ...) +(MaxFloat32x4 ...) => (VMAXPS128 ...) +(MaxFloat32x8 ...) => (VMAXPS256 ...) +(MaxFloat64x2 ...) => (VMAXPD128 ...) +(MaxFloat64x4 ...) => (VMAXPD256 ...) +(MaxFloat64x8 ...) => (VMAXPD512 ...) +(MaxInt16x16 ...) => (VPMAXSW256 ...) +(MaxInt16x32 ...) => (VPMAXSW512 ...) +(MaxInt16x8 ...) => (VPMAXSW128 ...) +(MaxInt32x16 ...) => (VPMAXSD512 ...) +(MaxInt32x4 ...) => (VPMAXSD128 ...) +(MaxInt32x8 ...) => (VPMAXSD256 ...) +(MaxInt64x2 ...) => (VPMAXSQ128 ...) +(MaxInt64x4 ...) => (VPMAXSQ256 ...) +(MaxInt64x8 ...) => (VPMAXSQ512 ...) +(MaxInt8x16 ...) => (VPMAXSB128 ...) +(MaxInt8x32 ...) => (VPMAXSB256 ...) +(MaxInt8x64 ...) => (VPMAXSB512 ...) +(MaxUint16x16 ...) => (VPMAXUW256 ...) +(MaxUint16x32 ...) => (VPMAXUW512 ...) +(MaxUint16x8 ...) => (VPMAXUW128 ...) +(MaxUint32x16 ...) => (VPMAXUD512 ...) +(MaxUint32x4 ...) => (VPMAXUD128 ...) +(MaxUint32x8 ...) => (VPMAXUD256 ...) +(MaxUint64x2 ...) => (VPMAXUQ128 ...) +(MaxUint64x4 ...) => (VPMAXUQ256 ...) +(MaxUint64x8 ...) => (VPMAXUQ512 ...) +(MaxUint8x16 ...) => (VPMAXUB128 ...) +(MaxUint8x32 ...) => (VPMAXUB256 ...) +(MaxUint8x64 ...) => (VPMAXUB512 ...) +(MinFloat32x16 ...) => (VMINPS512 ...) +(MinFloat32x4 ...) => (VMINPS128 ...) +(MinFloat32x8 ...) => (VMINPS256 ...) +(MinFloat64x2 ...) => (VMINPD128 ...) +(MinFloat64x4 ...) => (VMINPD256 ...) +(MinFloat64x8 ...) => (VMINPD512 ...) +(MinInt16x16 ...) => (VPMINSW256 ...) +(MinInt16x32 ...) => (VPMINSW512 ...) +(MinInt16x8 ...) => (VPMINSW128 ...) +(MinInt32x16 ...) => (VPMINSD512 ...) +(MinInt32x4 ...) => (VPMINSD128 ...) +(MinInt32x8 ...) => (VPMINSD256 ...) +(MinInt64x2 ...) => (VPMINSQ128 ...) +(MinInt64x4 ...) => (VPMINSQ256 ...) +(MinInt64x8 ...) => (VPMINSQ512 ...) +(MinInt8x16 ...) => (VPMINSB128 ...) +(MinInt8x32 ...) => (VPMINSB256 ...) +(MinInt8x64 ...) => (VPMINSB512 ...) +(MinUint16x16 ...) => (VPMINUW256 ...) +(MinUint16x32 ...) => (VPMINUW512 ...) +(MinUint16x8 ...) => (VPMINUW128 ...) +(MinUint32x16 ...) => (VPMINUD512 ...) +(MinUint32x4 ...) => (VPMINUD128 ...) +(MinUint32x8 ...) => (VPMINUD256 ...) +(MinUint64x2 ...) => (VPMINUQ128 ...) +(MinUint64x4 ...) => (VPMINUQ256 ...) +(MinUint64x8 ...) => (VPMINUQ512 ...) +(MinUint8x16 ...) => (VPMINUB128 ...) +(MinUint8x32 ...) => (VPMINUB256 ...) +(MinUint8x64 ...) => (VPMINUB512 ...) +(MulFloat32x16 ...) => (VMULPS512 ...) +(MulFloat32x4 ...) => (VMULPS128 ...) +(MulFloat32x8 ...) => (VMULPS256 ...) +(MulFloat64x2 ...) => (VMULPD128 ...) +(MulFloat64x4 ...) => (VMULPD256 ...) +(MulFloat64x8 ...) => (VMULPD512 ...) +(MulByPowOf2Float32x16 ...) => (VSCALEFPS512 ...) +(MulByPowOf2Float32x4 ...) => (VSCALEFPS128 ...) +(MulByPowOf2Float32x8 ...) => (VSCALEFPS256 ...) +(MulByPowOf2Float64x2 ...) => (VSCALEFPD128 ...) +(MulByPowOf2Float64x4 ...) => (VSCALEFPD256 ...) +(MulByPowOf2Float64x8 ...) => (VSCALEFPD512 ...) +(MulEvenWidenInt32x4 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt32x8 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x2 ...) => (VPMULDQ128 ...) +(MulEvenWidenInt64x4 ...) => (VPMULDQ256 ...) +(MulEvenWidenInt64x8 ...) => (VPMULDQ512 ...) +(MulEvenWidenUint32x4 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint32x8 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x2 ...) => (VPMULUDQ128 ...) +(MulEvenWidenUint64x4 ...) => (VPMULUDQ256 ...) +(MulEvenWidenUint64x8 ...) => (VPMULUDQ512 ...) +(MulHighInt16x16 ...) => (VPMULHW256 ...) +(MulHighInt16x32 ...) => (VPMULHW512 ...) +(MulHighInt16x8 ...) => (VPMULHW128 ...) +(MulHighUint16x16 ...) => (VPMULHUW256 ...) +(MulHighUint16x32 ...) => (VPMULHUW512 ...) +(MulHighUint16x8 ...) => (VPMULHUW128 ...) +(MulLowInt16x16 ...) => (VPMULLW256 ...) +(MulLowInt16x32 ...) => (VPMULLW512 ...) +(MulLowInt16x8 ...) => (VPMULLW128 ...) +(MulLowInt32x16 ...) => (VPMULLD512 ...) +(MulLowInt32x4 ...) => (VPMULLD128 ...) +(MulLowInt32x8 ...) => (VPMULLD256 ...) +(MulLowInt64x2 ...) => (VPMULLQ128 ...) +(MulLowInt64x4 ...) => (VPMULLQ256 ...) +(MulLowInt64x8 ...) => (VPMULLQ512 ...) +(NotEqualFloat32x16 x y) => (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) +(NotEqualFloat32x4 x y) => (VCMPPS128 [4] x y) +(NotEqualFloat32x8 x y) => (VCMPPS256 [4] x y) +(NotEqualFloat64x2 x y) => (VCMPPD128 [4] x y) +(NotEqualFloat64x4 x y) => (VCMPPD256 [4] x y) +(NotEqualFloat64x8 x y) => (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) +(NotEqualInt16x16 x y) => (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) +(NotEqualInt16x32 x y) => (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) +(NotEqualInt16x8 x y) => (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) +(NotEqualInt32x16 x y) => (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) +(NotEqualInt32x4 x y) => (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) +(NotEqualInt32x8 x y) => (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) +(NotEqualInt64x2 x y) => (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) +(NotEqualInt64x4 x y) => (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) +(NotEqualInt64x8 x y) => (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) +(NotEqualInt8x16 x y) => (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) +(NotEqualInt8x32 x y) => (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) +(NotEqualInt8x64 x y) => (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) +(NotEqualUint16x16 x y) => (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) +(NotEqualUint16x32 x y) => (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) +(NotEqualUint16x8 x y) => (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) +(NotEqualUint32x16 x y) => (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) +(NotEqualUint32x4 x y) => (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) +(NotEqualUint32x8 x y) => (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) +(NotEqualUint64x2 x y) => (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) +(NotEqualUint64x4 x y) => (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) +(NotEqualUint64x8 x y) => (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) +(NotEqualUint8x16 x y) => (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) +(NotEqualUint8x32 x y) => (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) +(NotEqualUint8x64 x y) => (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) +(OrFloat32x16 ...) => (VORPS512 ...) +(OrFloat32x4 ...) => (VORPS128 ...) +(OrFloat32x8 ...) => (VORPS256 ...) +(OrFloat64x2 ...) => (VORPD128 ...) +(OrFloat64x4 ...) => (VORPD256 ...) +(OrFloat64x8 ...) => (VORPD512 ...) +(OrInt16x16 ...) => (VPOR256 ...) +(OrInt16x8 ...) => (VPOR128 ...) +(OrInt32x16 ...) => (VPORD512 ...) +(OrInt32x4 ...) => (VPOR128 ...) +(OrInt32x8 ...) => (VPOR256 ...) +(OrInt64x2 ...) => (VPOR128 ...) +(OrInt64x4 ...) => (VPOR256 ...) +(OrInt64x8 ...) => (VPORQ512 ...) +(OrInt8x16 ...) => (VPOR128 ...) +(OrInt8x32 ...) => (VPOR256 ...) +(OrUint16x16 ...) => (VPOR256 ...) +(OrUint16x8 ...) => (VPOR128 ...) +(OrUint32x16 ...) => (VPORD512 ...) +(OrUint32x4 ...) => (VPOR128 ...) +(OrUint32x8 ...) => (VPOR256 ...) +(OrUint64x2 ...) => (VPOR128 ...) +(OrUint64x4 ...) => (VPOR256 ...) +(OrUint64x8 ...) => (VPORQ512 ...) +(OrUint8x16 ...) => (VPOR128 ...) +(OrUint8x32 ...) => (VPOR256 ...) +(PairwiseAddFloat32x4 ...) => (VHADDPS128 ...) +(PairwiseAddFloat32x8 ...) => (VHADDPS256 ...) +(PairwiseAddFloat64x2 ...) => (VHADDPD128 ...) +(PairwiseAddFloat64x4 ...) => (VHADDPD256 ...) +(PairwiseAddInt16x16 ...) => (VPHADDW256 ...) +(PairwiseAddInt16x8 ...) => (VPHADDW128 ...) +(PairwiseAddInt32x4 ...) => (VPHADDD128 ...) +(PairwiseAddInt32x8 ...) => (VPHADDD256 ...) +(PairwiseAddUint16x16 ...) => (VPHADDW256 ...) +(PairwiseAddUint16x8 ...) => (VPHADDW128 ...) +(PairwiseAddUint32x4 ...) => (VPHADDD128 ...) +(PairwiseAddUint32x8 ...) => (VPHADDD256 ...) +(PairwiseSubFloat32x4 ...) => (VHSUBPS128 ...) +(PairwiseSubFloat32x8 ...) => (VHSUBPS256 ...) +(PairwiseSubFloat64x2 ...) => (VHSUBPD128 ...) +(PairwiseSubFloat64x4 ...) => (VHSUBPD256 ...) +(PairwiseSubInt16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubInt16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubInt32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubInt32x8 ...) => (VPHSUBD256 ...) +(PairwiseSubUint16x16 ...) => (VPHSUBW256 ...) +(PairwiseSubUint16x8 ...) => (VPHSUBW128 ...) +(PairwiseSubUint32x4 ...) => (VPHSUBD128 ...) +(PairwiseSubUint32x8 ...) => (VPHSUBD256 ...) +(PopCountInt16x16 ...) => (VPOPCNTW256 ...) +(PopCountInt16x32 ...) => (VPOPCNTW512 ...) +(PopCountInt16x8 ...) => (VPOPCNTW128 ...) +(PopCountInt32x16 ...) => (VPOPCNTD512 ...) +(PopCountInt32x4 ...) => (VPOPCNTD128 ...) +(PopCountInt32x8 ...) => (VPOPCNTD256 ...) +(PopCountInt64x2 ...) => (VPOPCNTQ128 ...) +(PopCountInt64x4 ...) => (VPOPCNTQ256 ...) +(PopCountInt64x8 ...) => (VPOPCNTQ512 ...) +(PopCountInt8x16 ...) => (VPOPCNTB128 ...) +(PopCountInt8x32 ...) => (VPOPCNTB256 ...) +(PopCountInt8x64 ...) => (VPOPCNTB512 ...) +(PopCountUint16x16 ...) => (VPOPCNTW256 ...) +(PopCountUint16x32 ...) => (VPOPCNTW512 ...) +(PopCountUint16x8 ...) => (VPOPCNTW128 ...) +(PopCountUint32x16 ...) => (VPOPCNTD512 ...) +(PopCountUint32x4 ...) => (VPOPCNTD128 ...) +(PopCountUint32x8 ...) => (VPOPCNTD256 ...) +(PopCountUint64x2 ...) => (VPOPCNTQ128 ...) +(PopCountUint64x4 ...) => (VPOPCNTQ256 ...) +(PopCountUint64x8 ...) => (VPOPCNTQ512 ...) +(PopCountUint8x16 ...) => (VPOPCNTB128 ...) +(PopCountUint8x32 ...) => (VPOPCNTB256 ...) +(PopCountUint8x64 ...) => (VPOPCNTB512 ...) +(SaturatedAddInt16x16 ...) => (VPADDSW256 ...) +(SaturatedAddInt16x32 ...) => (VPADDSW512 ...) +(SaturatedAddInt16x8 ...) => (VPADDSW128 ...) +(SaturatedAddInt8x16 ...) => (VPADDSB128 ...) +(SaturatedAddInt8x32 ...) => (VPADDSB256 ...) +(SaturatedAddInt8x64 ...) => (VPADDSB512 ...) +(SaturatedAddUint16x16 ...) => (VPADDSW256 ...) +(SaturatedAddUint16x32 ...) => (VPADDSW512 ...) +(SaturatedAddUint16x8 ...) => (VPADDSW128 ...) +(SaturatedAddUint8x16 ...) => (VPADDSB128 ...) +(SaturatedAddUint8x32 ...) => (VPADDSB256 ...) +(SaturatedAddUint8x64 ...) => (VPADDSB512 ...) +(SaturatedPairwiseAddInt16x16 ...) => (VPHADDSW256 ...) +(SaturatedPairwiseAddInt16x8 ...) => (VPHADDSW128 ...) +(SaturatedPairwiseSubInt16x16 ...) => (VPHSUBSW256 ...) +(SaturatedPairwiseSubInt16x8 ...) => (VPHSUBSW128 ...) +(SaturatedSubInt16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubInt16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubInt16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubInt8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubInt8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubInt8x64 ...) => (VPSUBSB512 ...) +(SaturatedSubUint16x16 ...) => (VPSUBSW256 ...) +(SaturatedSubUint16x32 ...) => (VPSUBSW512 ...) +(SaturatedSubUint16x8 ...) => (VPSUBSW128 ...) +(SaturatedSubUint8x16 ...) => (VPSUBSB128 ...) +(SaturatedSubUint8x32 ...) => (VPSUBSB256 ...) +(SaturatedSubUint8x64 ...) => (VPSUBSB512 ...) +(SignInt16x16 ...) => (VPSIGNW256 ...) +(SignInt16x8 ...) => (VPSIGNW128 ...) +(SignInt32x4 ...) => (VPSIGND128 ...) +(SignInt32x8 ...) => (VPSIGND256 ...) +(SignInt8x16 ...) => (VPSIGNB128 ...) +(SignInt8x32 ...) => (VPSIGNB256 ...) +(SqrtFloat32x16 ...) => (VSQRTPS512 ...) +(SqrtFloat32x4 ...) => (VSQRTPS128 ...) +(SqrtFloat32x8 ...) => (VSQRTPS256 ...) +(SqrtFloat64x2 ...) => (VSQRTPD128 ...) +(SqrtFloat64x4 ...) => (VSQRTPD256 ...) +(SqrtFloat64x8 ...) => (VSQRTPD512 ...) +(SubFloat32x16 ...) => (VADDPS512 ...) +(SubFloat32x4 ...) => (VADDPS128 ...) +(SubFloat32x8 ...) => (VADDPS256 ...) +(SubFloat64x2 ...) => (VADDPD128 ...) +(SubFloat64x4 ...) => (VADDPD256 ...) +(SubFloat64x8 ...) => (VADDPD512 ...) +(SubInt16x16 ...) => (VPSUBW256 ...) +(SubInt16x32 ...) => (VPSUBW512 ...) +(SubInt16x8 ...) => (VPSUBW128 ...) +(SubInt32x16 ...) => (VPSUBD512 ...) +(SubInt32x4 ...) => (VPSUBD128 ...) +(SubInt32x8 ...) => (VPSUBD256 ...) +(SubInt64x2 ...) => (VPSUBQ128 ...) +(SubInt64x4 ...) => (VPSUBQ256 ...) +(SubInt64x8 ...) => (VPSUBQ512 ...) +(SubInt8x16 ...) => (VPSUBB128 ...) +(SubInt8x32 ...) => (VPSUBB256 ...) +(SubInt8x64 ...) => (VPSUBB512 ...) +(SubUint16x16 ...) => (VPSUBW256 ...) +(SubUint16x32 ...) => (VPSUBW512 ...) +(SubUint16x8 ...) => (VPSUBW128 ...) +(SubUint32x16 ...) => (VPSUBD512 ...) +(SubUint32x4 ...) => (VPSUBD128 ...) +(SubUint32x8 ...) => (VPSUBD256 ...) +(SubUint64x2 ...) => (VPSUBQ128 ...) +(SubUint64x4 ...) => (VPSUBQ256 ...) +(SubUint64x8 ...) => (VPSUBQ512 ...) +(SubUint8x16 ...) => (VPSUBB128 ...) +(SubUint8x32 ...) => (VPSUBB256 ...) +(SubUint8x64 ...) => (VPSUBB512 ...) +(XorFloat32x16 ...) => (VXORPS512 ...) +(XorFloat32x4 ...) => (VXORPS128 ...) +(XorFloat32x8 ...) => (VXORPS256 ...) +(XorFloat64x2 ...) => (VXORPD128 ...) +(XorFloat64x4 ...) => (VXORPD256 ...) +(XorFloat64x8 ...) => (VXORPD512 ...) +(XorInt16x16 ...) => (VPXOR256 ...) +(XorInt16x8 ...) => (VPXOR128 ...) +(XorInt32x16 ...) => (VPXORD512 ...) +(XorInt32x4 ...) => (VPXOR128 ...) +(XorInt32x8 ...) => (VPXOR256 ...) +(XorInt64x2 ...) => (VPXOR128 ...) +(XorInt64x4 ...) => (VPXOR256 ...) +(XorInt64x8 ...) => (VPXORQ512 ...) +(XorInt8x16 ...) => (VPXOR128 ...) +(XorInt8x32 ...) => (VPXOR256 ...) +(XorUint16x16 ...) => (VPXOR256 ...) +(XorUint16x8 ...) => (VPXOR128 ...) +(XorUint32x16 ...) => (VPXORD512 ...) +(XorUint32x4 ...) => (VPXOR128 ...) +(XorUint32x8 ...) => (VPXOR256 ...) +(XorUint64x2 ...) => (VPXOR128 ...) +(XorUint64x4 ...) => (VPXOR256 ...) +(XorUint64x8 ...) => (VPXORQ512 ...) +(XorUint8x16 ...) => (VPXOR128 ...) +(XorUint8x32 ...) => (VPXOR256 ...) diff --git a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go index a27ed4afb9..b9709ca819 100644 --- a/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go +++ b/src/cmd/compile/internal/ssa/_gen/simdAMD64ops.go @@ -1,591 +1,607 @@ // Code generated by x/arch/internal/simdgen using 'go run . -xedPath $XED_PATH -o godefs -goroot $GOROOT go.yaml types.yaml categories.yaml'; DO NOT EDIT. package main -func simdAMD64Ops(fp1fp1, fp2fp1, fp2m1, fp1m1fp1, fp2m1fp1, fp2m1m1, fp3fp1, fp3m1fp1 regInfo) []opData { +func simdAMD64Ops(fp11, fp21, fp2k1, fp1k1fp1, fp2k1fp1, fp2k1k1, fp31, fp3k1fp1 regInfo) []opData { return []opData{ - {name: "VADDPS512", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS512", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPS512", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PS512", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PS512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPS512", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VANDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec512"}, - {name: "VANDNPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512"}, - {name: "VRCP14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512"}, - {name: "VDIVPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512"}, - {name: "VMAXPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPSMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VADDPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec512"}, - {name: "VXORPSMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VMAXPS512", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec512"}, - {name: "VMINPS512", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec512"}, - {name: "VMULPS512", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPS512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512"}, - {name: "VORPS512", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec512"}, - {name: "VSQRTPS512", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512"}, - {name: "VXORPS512", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec512"}, - {name: "VANDPS128", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPS128", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PS128", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VDIVPS128", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VADDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: true, typ: "Vec128"}, - {name: "VANDPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec128"}, - {name: "VANDNPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128"}, - {name: "VRCP14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128"}, - {name: "VDIVPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128"}, - {name: "VMAXPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VSQRTPSMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VXORPSMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VMAXPS128", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec128"}, - {name: "VMINPS128", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec128"}, - {name: "VMULPS128", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPS128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128"}, - {name: "VORPS128", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec128"}, - {name: "VHADDPS128", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec128"}, - {name: "VHSUBPS128", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec128"}, - {name: "VSQRTPS128", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128"}, - {name: "VADDPS128", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: false, typ: "Vec128"}, - {name: "VXORPS128", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec128"}, - {name: "VADDPS256", argLength: 2, reg: fp2fp1, asm: "VADDPS", commutative: true, typ: "Vec256"}, - {name: "VANDPS256", argLength: 2, reg: fp2fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPS256", argLength: 2, reg: fp2fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PS256", argLength: 1, reg: fp1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VRSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VDIVPS256", argLength: 2, reg: fp2fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VANDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPS", commutative: true, typ: "Vec256"}, - {name: "VANDNPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256"}, - {name: "VRCP14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256"}, - {name: "VDIVPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256"}, - {name: "VMAXPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VSQRTPSMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VADDPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPS", commutative: false, typ: "Vec256"}, - {name: "VXORPSMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VMAXPS256", argLength: 2, reg: fp2fp1, asm: "VMAXPS", commutative: true, typ: "Vec256"}, - {name: "VMINPS256", argLength: 2, reg: fp2fp1, asm: "VMINPS", commutative: true, typ: "Vec256"}, - {name: "VMULPS256", argLength: 2, reg: fp2fp1, asm: "VMULPS", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPS256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256"}, - {name: "VORPS256", argLength: 2, reg: fp2fp1, asm: "VORPS", commutative: true, typ: "Vec256"}, - {name: "VHADDPS256", argLength: 2, reg: fp2fp1, asm: "VHADDPS", commutative: false, typ: "Vec256"}, - {name: "VHSUBPS256", argLength: 2, reg: fp2fp1, asm: "VHSUBPS", commutative: false, typ: "Vec256"}, - {name: "VSQRTPS256", argLength: 1, reg: fp1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256"}, - {name: "VXORPS256", argLength: 2, reg: fp2fp1, asm: "VXORPS", commutative: true, typ: "Vec256"}, - {name: "VADDPD128", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPD128", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPD128", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PD128", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PD128", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPD128", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VADDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: true, typ: "Vec128"}, - {name: "VANDPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec128"}, - {name: "VANDNPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128"}, - {name: "VRCP14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128"}, - {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128"}, - {name: "VDIVPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128"}, - {name: "VMAXPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VSQRTPDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VMAXPD128", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec128"}, - {name: "VMINPD128", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec128"}, - {name: "VMULPD128", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec128"}, - {name: "VSCALEFPD128", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128"}, - {name: "VORPD128", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec128"}, - {name: "VHADDPD128", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec128"}, - {name: "VHSUBPD128", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec128"}, - {name: "VSQRTPD128", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128"}, - {name: "VXORPD128", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec128"}, - {name: "VADDPD256", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD256", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPD256", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PD256", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PD256", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPD256", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VANDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec256"}, - {name: "VANDNPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256"}, - {name: "VRCP14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256"}, - {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256"}, - {name: "VDIVPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256"}, - {name: "VMAXPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VSQRTPDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VADDPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec256"}, - {name: "VXORPDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VMAXPD256", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec256"}, - {name: "VMINPD256", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec256"}, - {name: "VMULPD256", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec256"}, - {name: "VSCALEFPD256", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256"}, - {name: "VORPD256", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec256"}, - {name: "VHADDPD256", argLength: 2, reg: fp2fp1, asm: "VHADDPD", commutative: false, typ: "Vec256"}, - {name: "VHSUBPD256", argLength: 2, reg: fp2fp1, asm: "VHSUBPD", commutative: false, typ: "Vec256"}, - {name: "VSQRTPD256", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256"}, - {name: "VXORPD256", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec256"}, - {name: "VANDPD512", argLength: 2, reg: fp2fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPD512", argLength: 2, reg: fp2fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PD512", argLength: 1, reg: fp1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PD512", argLength: 1, reg: fp1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPD512", argLength: 2, reg: fp2fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VANDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDPD", commutative: true, typ: "Vec512"}, - {name: "VANDNPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512"}, - {name: "VRCP14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512"}, - {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512"}, - {name: "VDIVPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512"}, - {name: "VMAXPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VMAXPD512", argLength: 2, reg: fp2fp1, asm: "VMAXPD", commutative: true, typ: "Vec512"}, - {name: "VMINPD512", argLength: 2, reg: fp2fp1, asm: "VMINPD", commutative: true, typ: "Vec512"}, - {name: "VMULPD512", argLength: 2, reg: fp2fp1, asm: "VMULPD", commutative: true, typ: "Vec512"}, - {name: "VSCALEFPD512", argLength: 2, reg: fp2fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512"}, - {name: "VORPD512", argLength: 2, reg: fp2fp1, asm: "VORPD", commutative: true, typ: "Vec512"}, - {name: "VSQRTPD512", argLength: 1, reg: fp1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512"}, - {name: "VADDPD512", argLength: 2, reg: fp2fp1, asm: "VADDPD", commutative: false, typ: "Vec512"}, - {name: "VXORPD512", argLength: 2, reg: fp2fp1, asm: "VXORPD", commutative: true, typ: "Vec512"}, - {name: "VPABSW256", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDW256", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQW256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTW256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec256"}, - {name: "VPABSWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec256"}, - {name: "VPADDWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPADDSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPSUBSWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPMAXSW256", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256"}, - {name: "VPMINSW256", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec256"}, - {name: "VPMULHW256", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec256"}, - {name: "VPMULLW256", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec256"}, - {name: "VPHSUBW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec256"}, - {name: "VPHADDSW256", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec256"}, - {name: "VPHSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSUBSW256", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256"}, - {name: "VPSIGNW256", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec256"}, - {name: "VPSUBW256", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec256"}, - {name: "VPABSW512", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPADDW512", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQW512", argLength: 2, reg: fp2m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTW512", argLength: 2, reg: fp2m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPABSWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec512"}, - {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPMAXSW512", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512"}, - {name: "VPMINSW512", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec512"}, - {name: "VPMULHW512", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec512"}, - {name: "VPMULLW512", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSW512", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPABSW128", argLength: 1, reg: fp1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPADDW128", argLength: 2, reg: fp2fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQW128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQW", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTW128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTW", commutative: false, typ: "Vec128"}, - {name: "VPABSWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSW", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQW", commutative: true, typ: "Mask"}, - {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTW", commutative: false, typ: "Mask"}, - {name: "VPMAXSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPSUBSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPMAXSW128", argLength: 2, reg: fp2fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128"}, - {name: "VPMINSW128", argLength: 2, reg: fp2fp1, asm: "VPMINSW", commutative: true, typ: "Vec128"}, - {name: "VPMULHW128", argLength: 2, reg: fp2fp1, asm: "VPMULHW", commutative: true, typ: "Vec128"}, - {name: "VPMULLW128", argLength: 2, reg: fp2fp1, asm: "VPMULLW", commutative: true, typ: "Vec128"}, - {name: "VPHSUBW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBW", commutative: false, typ: "Vec128"}, - {name: "VPHADDSW128", argLength: 2, reg: fp2fp1, asm: "VPHADDSW", commutative: false, typ: "Vec128"}, - {name: "VPHSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPHSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSIGNW128", argLength: 2, reg: fp2fp1, asm: "VPSIGNW", commutative: false, typ: "Vec128"}, - {name: "VPABSD512", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPANDD512", argLength: 2, reg: fp2fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPABSDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec512"}, - {name: "VPMAXSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPXORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXSD512", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512"}, - {name: "VPMINSD512", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec512"}, - {name: "VPMULLD512", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec512"}, - {name: "VPORD512", argLength: 2, reg: fp2fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPXORD512", argLength: 2, reg: fp2fp1, asm: "VPXORD", commutative: true, typ: "Vec512"}, - {name: "VPABSD128", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQD128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTD128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec128"}, - {name: "VPABSDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec128"}, - {name: "VPANDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPSUBDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPXORDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec128"}, - {name: "VPMAXSD128", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128"}, - {name: "VPMINSD128", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec128"}, - {name: "VPMULLD128", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec128"}, - {name: "VPHSUBD128", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec128"}, - {name: "VPSIGND128", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec128"}, - {name: "VPSUBD128", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec128"}, - {name: "VPABSD256", argLength: 1, reg: fp1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPAND256", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQD256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQD", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTD256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTD", commutative: false, typ: "Vec256"}, - {name: "VPABSDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec256"}, - {name: "VPSUBDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPMAXSD256", argLength: 2, reg: fp2fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256"}, - {name: "VPMINSD256", argLength: 2, reg: fp2fp1, asm: "VPMINSD", commutative: true, typ: "Vec256"}, - {name: "VPMULLD256", argLength: 2, reg: fp2fp1, asm: "VPMULLD", commutative: true, typ: "Vec256"}, - {name: "VPHSUBD256", argLength: 2, reg: fp2fp1, asm: "VPHSUBD", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTD256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPSIGND256", argLength: 2, reg: fp2fp1, asm: "VPSIGND", commutative: false, typ: "Vec256"}, - {name: "VPSUBD256", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec256"}, - {name: "VPABSQ128", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPCMPEQQ128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTQ128", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128"}, - {name: "VPANDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128"}, - {name: "VPANDNQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPSUBQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPMAXSQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128"}, - {name: "VPMINSQ128", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128"}, - {name: "VPMULDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128"}, - {name: "VPMULLQ128", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128"}, - {name: "VPOR128", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec128"}, - {name: "VPABSQ256", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPADDQ256", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTQ256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256"}, - {name: "VPANDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256"}, - {name: "VPANDNQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPMAXSQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256"}, - {name: "VPMINSQ256", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256"}, - {name: "VPMULDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256"}, - {name: "VPMULLQ256", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256"}, - {name: "VPOR256", argLength: 2, reg: fp2fp1, asm: "VPOR", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTQ256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256"}, - {name: "VPSUBQ256", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256"}, - {name: "VPABSQ512", argLength: 1, reg: fp1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPANDQ512", argLength: 2, reg: fp2fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQ512", argLength: 2, reg: fp2m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQ512", argLength: 2, reg: fp2m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPABSQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512"}, - {name: "VPADDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPEQQ", commutative: true, typ: "Mask"}, - {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPGTQ", commutative: false, typ: "Mask"}, - {name: "VPMAXSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXSQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512"}, - {name: "VPMINSQ512", argLength: 2, reg: fp2fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512"}, - {name: "VPMULDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512"}, - {name: "VPMULLQ512", argLength: 2, reg: fp2fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQ512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQ512", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQ512", argLength: 2, reg: fp2fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPABSB128", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDB128", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPAND128", argLength: 2, reg: fp2fp1, asm: "VPAND", commutative: true, typ: "Vec128"}, - {name: "VPCMPEQB128", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec128"}, - {name: "VPCMPGTB128", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec128"}, - {name: "VPABSBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec128"}, - {name: "VPADDBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec128"}, - {name: "VPMAXSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPMAXSB128", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128"}, - {name: "VPMINSB128", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec128"}, - {name: "VPSIGNB128", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec128"}, - {name: "VPSUBB128", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPABSB256", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPADDB256", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPANDN256", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec256"}, - {name: "VPCMPEQB256", argLength: 2, reg: fp2fp1, asm: "VPCMPEQB", commutative: true, typ: "Vec256"}, - {name: "VPCMPGTB256", argLength: 2, reg: fp2fp1, asm: "VPCMPGTB", commutative: false, typ: "Vec256"}, - {name: "VPABSBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPMAXSB256", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256"}, - {name: "VPMINSB256", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTB256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPSIGNB256", argLength: 2, reg: fp2fp1, asm: "VPSIGNB", commutative: false, typ: "Vec256"}, - {name: "VPABSB512", argLength: 1, reg: fp1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPABSBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPABSB", commutative: false, typ: "Vec512"}, - {name: "VPMAXSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPADDSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VPMAXSB512", argLength: 2, reg: fp2fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512"}, - {name: "VPMINSB512", argLength: 2, reg: fp2fp1, asm: "VPMINSB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTB512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSB512", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBB512", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPAVGW256", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPAVGWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256"}, - {name: "VPMAXUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUWMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPMAXUW256", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256"}, - {name: "VPMINUW256", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec256"}, - {name: "VPMULHUW256", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256"}, - {name: "VPHADDW256", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec256"}, - {name: "VPOPCNTW256", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256"}, - {name: "VPADDSW256", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec256"}, - {name: "VPAVGW512", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPADDWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec512"}, - {name: "VPAVGWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512"}, - {name: "VPMAXUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBSWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512"}, - {name: "VPSUBWMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPMAXUW512", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512"}, - {name: "VPMINUW512", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec512"}, - {name: "VPMULHUW512", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTW512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512"}, - {name: "VPADDSW512", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec512"}, - {name: "VPSUBW512", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec512"}, - {name: "VPAVGW128", argLength: 2, reg: fp2fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPADDWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDW", commutative: true, typ: "Vec128"}, - {name: "VPAVGWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128"}, - {name: "VPMAXUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPADDSWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBWMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPMAXUW128", argLength: 2, reg: fp2fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128"}, - {name: "VPMINUW128", argLength: 2, reg: fp2fp1, asm: "VPMINUW", commutative: true, typ: "Vec128"}, - {name: "VPMULHUW128", argLength: 2, reg: fp2fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128"}, - {name: "VPHADDW128", argLength: 2, reg: fp2fp1, asm: "VPHADDW", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTW128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128"}, - {name: "VPADDSW128", argLength: 2, reg: fp2fp1, asm: "VPADDSW", commutative: true, typ: "Vec128"}, - {name: "VPSUBSW128", argLength: 2, reg: fp2fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128"}, - {name: "VPSUBW128", argLength: 2, reg: fp2fp1, asm: "VPSUBW", commutative: false, typ: "Vec128"}, - {name: "VPADDD512", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDND512", argLength: 2, reg: fp2fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPADDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec512"}, - {name: "VPANDDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec512"}, - {name: "VPANDNDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec512"}, - {name: "VPMAXUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPORDMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORD", commutative: true, typ: "Vec512"}, - {name: "VPMAXUD512", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512"}, - {name: "VPMINUD512", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTD512", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512"}, - {name: "VPSUBD512", argLength: 2, reg: fp2fp1, asm: "VPSUBD", commutative: false, typ: "Vec512"}, - {name: "VPADDD128", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPADDDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec128"}, - {name: "VPANDNDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec128"}, - {name: "VPMAXUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUDMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPMAXUD128", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128"}, - {name: "VPMINUD128", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec128"}, - {name: "VPHADDD128", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec128"}, - {name: "VPOPCNTD128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128"}, - {name: "VPADDD256", argLength: 2, reg: fp2fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPADDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDD", commutative: true, typ: "Vec256"}, - {name: "VPANDDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDD", commutative: true, typ: "Vec256"}, - {name: "VPANDNDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPANDND", commutative: true, typ: "Vec256"}, - {name: "VPMAXUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256"}, - {name: "VPXORDMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORD", commutative: true, typ: "Vec256"}, - {name: "VPMAXUD256", argLength: 2, reg: fp2fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256"}, - {name: "VPMINUD256", argLength: 2, reg: fp2fp1, asm: "VPMINUD", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQ256", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPHADDD256", argLength: 2, reg: fp2fp1, asm: "VPHADDD", commutative: false, typ: "Vec256"}, - {name: "VPXOR256", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec256"}, - {name: "VPADDQ128", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPXORQMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128"}, - {name: "VPMAXUQ128", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128"}, - {name: "VPMINUQ128", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128"}, - {name: "VPMULUDQ128", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTQ128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128"}, - {name: "VPSUBQ128", argLength: 2, reg: fp2fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128"}, - {name: "VPXOR128", argLength: 2, reg: fp2fp1, asm: "VPXOR", commutative: true, typ: "Vec128"}, - {name: "VPADDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPMULUDQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256"}, - {name: "VPXORQMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256"}, - {name: "VPMAXUQ256", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256"}, - {name: "VPMINUQ256", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256"}, - {name: "VPADDQ512", argLength: 2, reg: fp2fp1, asm: "VPADDQ", commutative: true, typ: "Vec512"}, - {name: "VPANDNQ512", argLength: 2, reg: fp2fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512"}, - {name: "VPANDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512"}, - {name: "VPSUBQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512"}, - {name: "VPXORQMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512"}, - {name: "VPMAXUQ512", argLength: 2, reg: fp2fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512"}, - {name: "VPMINUQ512", argLength: 2, reg: fp2fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512"}, - {name: "VPMULUDQ512", argLength: 2, reg: fp2fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512"}, - {name: "VPORQ512", argLength: 2, reg: fp2fp1, asm: "VPORQ", commutative: true, typ: "Vec512"}, - {name: "VPANDN128", argLength: 2, reg: fp2fp1, asm: "VPANDN", commutative: true, typ: "Vec128"}, - {name: "VPAVGB128", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPAVGBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128"}, - {name: "VPMAXUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBBMasked128", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128"}, - {name: "VPMAXUB128", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128"}, - {name: "VPMINUB128", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec128"}, - {name: "VPOPCNTB128", argLength: 1, reg: fp1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128"}, - {name: "VPADDSB128", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec128"}, - {name: "VPSUBSB128", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128"}, - {name: "VPAVGB256", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPADDBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec256"}, - {name: "VPAVGBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256"}, - {name: "VPMAXUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256"}, - {name: "VPADDSBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBBMasked256", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPMAXUB256", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256"}, - {name: "VPMINUB256", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec256"}, - {name: "VPADDSB256", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec256"}, - {name: "VPSUBSB256", argLength: 2, reg: fp2fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256"}, - {name: "VPSUBB256", argLength: 2, reg: fp2fp1, asm: "VPSUBB", commutative: false, typ: "Vec256"}, - {name: "VPADDB512", argLength: 2, reg: fp2fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGB512", argLength: 2, reg: fp2fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPADDBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPADDB", commutative: true, typ: "Vec512"}, - {name: "VPAVGBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512"}, - {name: "VPMAXUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1m1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512"}, - {name: "VPSUBSBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512"}, - {name: "VPSUBBMasked512", argLength: 3, reg: fp2m1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512"}, - {name: "VPMAXUB512", argLength: 2, reg: fp2fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512"}, - {name: "VPMINUB512", argLength: 2, reg: fp2fp1, asm: "VPMINUB", commutative: true, typ: "Vec512"}, - {name: "VPADDSB512", argLength: 2, reg: fp2fp1, asm: "VPADDSB", commutative: true, typ: "Vec512"}, - {name: "VCMPPS512", argLength: 2, reg: fp2m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPSMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS128", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPSMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPS256", argLength: 2, reg: fp2fp1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPSMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPS", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPD128", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec128"}, - {name: "VCMPPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD256", argLength: 2, reg: fp2fp1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Vec256"}, - {name: "VCMPPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VCMPPD512", argLength: 2, reg: fp2m1, asm: "VCMPPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VCMPPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW256", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPW512", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPW128", argLength: 2, reg: fp2m1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD512", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPD128", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPD256", argLength: 2, reg: fp2m1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ128", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQ256", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPQ512", argLength: 2, reg: fp2m1, asm: "VPCMPQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB128", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPB256", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPB512", argLength: 2, reg: fp2m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW256", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW512", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUWMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUW128", argLength: 2, reg: fp2m1, asm: "VPCMPUW", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUWMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUDMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD512", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUD128", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUDMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUD", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUD256", argLength: 2, reg: fp2m1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ128", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQ256", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask"}, - {name: "VPCMPUQ512", argLength: 2, reg: fp2m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUQMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUQ", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB128", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked128", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB256", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked256", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUB512", argLength: 2, reg: fp2m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, - {name: "VPCMPUBMasked512", argLength: 3, reg: fp2m1m1, asm: "VPCMPUB", aux: "Int8", commutative: false, typ: "Mask"}, + {name: "VADDPS512", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPS512", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPS512", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PS512", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PS512", argLength: 1, reg: fp11, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPS512", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPSMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPSMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPS512", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPS512", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPS512", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPS512", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPS512", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPS512", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPS512", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VADDPS128", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPS128", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPS128", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PS128", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRTPS128", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPS128", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPSMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPSMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPS128", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPS128", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPS128", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPS128", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPS128", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPS128", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPS128", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPS128", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPS128", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPS256", argLength: 2, reg: fp21, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPS256", argLength: 2, reg: fp21, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPS256", argLength: 2, reg: fp21, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PS256", argLength: 1, reg: fp11, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRTPS256", argLength: 1, reg: fp11, asm: "VRSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPS256", argLength: 2, reg: fp21, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPSMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPSMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPS256", argLength: 2, reg: fp21, asm: "VMAXPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPS256", argLength: 2, reg: fp21, asm: "VMINPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPS256", argLength: 2, reg: fp21, asm: "VMULPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPS256", argLength: 2, reg: fp21, asm: "VSCALEFPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPS256", argLength: 2, reg: fp21, asm: "VORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPS256", argLength: 2, reg: fp21, asm: "VHADDPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPS256", argLength: 2, reg: fp21, asm: "VHSUBPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPS256", argLength: 1, reg: fp11, asm: "VSQRTPS", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPS256", argLength: 2, reg: fp21, asm: "VXORPS", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD128", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPD128", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPD128", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PD128", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PD128", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPD128", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VADDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VANDNPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VRCP14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VRSQRT14PDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VDIVPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMAXPD128", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMINPD128", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VMULPD128", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VSCALEFPD128", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VORPD128", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VHADDPD128", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VHSUBPD128", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VSQRTPD128", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VXORPD128", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VADDPD256", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPD256", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPD256", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PD256", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PD256", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPD256", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VADDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VANDNPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VRCP14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VRSQRT14PDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VDIVPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMAXPD256", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMINPD256", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VMULPD256", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VSCALEFPD256", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VORPD256", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VHADDPD256", argLength: 2, reg: fp21, asm: "VHADDPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VHSUBPD256", argLength: 2, reg: fp21, asm: "VHSUBPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VSQRTPD256", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VXORPD256", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VADDPD512", argLength: 2, reg: fp21, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPD512", argLength: 2, reg: fp21, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPD512", argLength: 2, reg: fp21, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PD512", argLength: 1, reg: fp11, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PD512", argLength: 1, reg: fp11, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPD512", argLength: 2, reg: fp21, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VADDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VADDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VANDNPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VANDNPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VRCP14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRCP14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VRSQRT14PDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VRSQRT14PD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VDIVPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VDIVPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMAXPD512", argLength: 2, reg: fp21, asm: "VMAXPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMINPD512", argLength: 2, reg: fp21, asm: "VMINPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VMULPD512", argLength: 2, reg: fp21, asm: "VMULPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSCALEFPD512", argLength: 2, reg: fp21, asm: "VSCALEFPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VORPD512", argLength: 2, reg: fp21, asm: "VORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VSQRTPD512", argLength: 1, reg: fp11, asm: "VSQRTPD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VXORPD512", argLength: 2, reg: fp21, asm: "VXORPD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW256", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDW256", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAND256", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDN256", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQW256", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTW256", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTWMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSW256", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSW256", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHW256", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLW256", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOR256", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDW256", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBW256", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTW256", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSW256", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDSW256", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBSW256", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSW256", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNW256", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBW256", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXOR256", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPABSW512", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDW512", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQW512", argLength: 2, reg: fp2k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTW512", argLength: 2, reg: fp2k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTWMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSW512", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSW512", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHW512", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLW512", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTW512", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSW512", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSW512", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBW512", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPABSW128", argLength: 1, reg: fp11, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDW128", argLength: 2, reg: fp21, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAND128", argLength: 2, reg: fp21, asm: "VPAND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDN128", argLength: 2, reg: fp21, asm: "VPANDN", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQW128", argLength: 2, reg: fp21, asm: "VPCMPEQW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTW128", argLength: 2, reg: fp21, asm: "VPCMPGTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQW", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTW", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTWMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSW128", argLength: 2, reg: fp21, asm: "VPMAXSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSW128", argLength: 2, reg: fp21, asm: "VPMINSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHW128", argLength: 2, reg: fp21, asm: "VPMULHW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLW128", argLength: 2, reg: fp21, asm: "VPMULLW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOR128", argLength: 2, reg: fp21, asm: "VPOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDW128", argLength: 2, reg: fp21, asm: "VPHADDW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBW128", argLength: 2, reg: fp21, asm: "VPHSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTW128", argLength: 1, reg: fp11, asm: "VPOPCNTW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSW128", argLength: 2, reg: fp21, asm: "VPADDSW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDSW128", argLength: 2, reg: fp21, asm: "VPHADDSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBSW128", argLength: 2, reg: fp21, asm: "VPHSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSW128", argLength: 2, reg: fp21, asm: "VPSUBSW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNW128", argLength: 2, reg: fp21, asm: "VPSIGNW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBW128", argLength: 2, reg: fp21, asm: "VPSUBW", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXOR128", argLength: 2, reg: fp21, asm: "VPXOR", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD512", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDD512", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDD512", argLength: 2, reg: fp21, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDND512", argLength: 2, reg: fp21, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQD512", argLength: 2, reg: fp2k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTD512", argLength: 2, reg: fp2k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTDMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSD512", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSD512", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLD512", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORD512", argLength: 2, reg: fp21, asm: "VPORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTD512", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBD512", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORD512", argLength: 2, reg: fp21, asm: "VPXORD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSD128", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDD128", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQD128", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTD128", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTDMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSD128", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSD128", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQ128", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLD128", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPHADDD128", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPHSUBD128", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTD128", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGND128", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBD128", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSD256", argLength: 1, reg: fp11, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDD256", argLength: 2, reg: fp21, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQD256", argLength: 2, reg: fp21, asm: "VPCMPEQD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTD256", argLength: 2, reg: fp21, asm: "VPCMPGTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDND", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQD", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTD", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTDMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSD256", argLength: 2, reg: fp21, asm: "VPMAXSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSD256", argLength: 2, reg: fp21, asm: "VPMINSD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQ256", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLD256", argLength: 2, reg: fp21, asm: "VPMULLD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPHADDD256", argLength: 2, reg: fp21, asm: "VPHADDD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPHSUBD256", argLength: 2, reg: fp21, asm: "VPHSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTD256", argLength: 1, reg: fp11, asm: "VPOPCNTD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGND256", argLength: 2, reg: fp21, asm: "VPSIGND", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBD256", argLength: 2, reg: fp21, asm: "VPSUBD", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ128", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQ128", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQ128", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTQ128", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPANDNQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPXORQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSQ128", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSQ128", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULLQ128", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTQ128", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBQ128", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSQ256", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQ256", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQ256", argLength: 2, reg: fp21, asm: "VPCMPEQQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTQ256", argLength: 2, reg: fp21, asm: "VPCMPGTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPANDNQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPXORQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSQ256", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSQ256", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULLQ256", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTQ256", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBQ256", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSQ512", argLength: 1, reg: fp11, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQ512", argLength: 2, reg: fp21, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQ512", argLength: 2, reg: fp21, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQ512", argLength: 2, reg: fp21, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQ512", argLength: 2, reg: fp2k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQ512", argLength: 2, reg: fp2k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPANDNQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPANDNQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQQ", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTQ", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSQ512", argLength: 2, reg: fp21, asm: "VPMAXSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSQ512", argLength: 2, reg: fp21, asm: "VPMINSQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULDQ512", argLength: 2, reg: fp21, asm: "VPMULDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULLQ512", argLength: 2, reg: fp21, asm: "VPMULLQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPORQ512", argLength: 2, reg: fp21, asm: "VPORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTQ512", argLength: 1, reg: fp11, asm: "VPOPCNTQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBQ512", argLength: 2, reg: fp21, asm: "VPSUBQ", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPXORQ512", argLength: 2, reg: fp21, asm: "VPXORQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPABSB128", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDB128", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQB128", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPGTB128", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPCMPEQBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTBMasked128", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXSB128", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINSB128", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPOPCNTB128", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPADDSB128", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBSB128", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSIGNB128", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPSUBB128", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec128", resultInArg0: false}, + {name: "VPABSB256", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDB256", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQB256", argLength: 2, reg: fp21, asm: "VPCMPEQB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPGTB256", argLength: 2, reg: fp21, asm: "VPCMPGTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPCMPEQBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTBMasked256", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXSB256", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINSB256", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPOPCNTB256", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPADDSB256", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBSB256", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSIGNB256", argLength: 2, reg: fp21, asm: "VPSIGNB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPSUBB256", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec256", resultInArg0: false}, + {name: "VPABSB512", argLength: 1, reg: fp11, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDB512", argLength: 2, reg: fp21, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQB512", argLength: 2, reg: fp2k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTB512", argLength: 2, reg: fp2k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPABSBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPABSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPCMPEQBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPEQB", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPGTBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPGTB", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPMAXSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTBMasked512", argLength: 2, reg: fp1k1fp1, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXSB512", argLength: 2, reg: fp21, asm: "VPMAXSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINSB512", argLength: 2, reg: fp21, asm: "VPMINSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPOPCNTB512", argLength: 1, reg: fp11, asm: "VPOPCNTB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPADDSB512", argLength: 2, reg: fp21, asm: "VPADDSB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBSB512", argLength: 2, reg: fp21, asm: "VPSUBSB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPSUBB512", argLength: 2, reg: fp21, asm: "VPSUBB", commutative: false, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW256", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUWMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUW256", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUW256", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULHUW256", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGW512", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUWMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUW512", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUW512", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULHUW512", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGW128", argLength: 2, reg: fp21, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUWMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUW128", argLength: 2, reg: fp21, asm: "VPMAXUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUW128", argLength: 2, reg: fp21, asm: "VPMINUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULHUW128", argLength: 2, reg: fp21, asm: "VPMULHUW", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUDMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUD512", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUD512", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUDMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUD128", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUD128", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQ128", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUDMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUD256", argLength: 2, reg: fp21, asm: "VPMAXUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUD256", argLength: 2, reg: fp21, asm: "VPMINUD", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQ256", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMULUDQMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQ128", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUQ128", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMULUDQMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQ256", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUQ256", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUQ512", argLength: 2, reg: fp21, asm: "VPMAXUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUQ512", argLength: 2, reg: fp21, asm: "VPMINUQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMULUDQ512", argLength: 2, reg: fp21, asm: "VPMULUDQ", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGB128", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUBMasked128", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMAXUB128", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPMINUB128", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VPAVGB256", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUBMasked256", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMAXUB256", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPMINUB256", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VPAVGB512", argLength: 2, reg: fp21, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPAVGBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPAVGB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUBMasked512", argLength: 3, reg: fp2k1fp1, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMAXUB512", argLength: 2, reg: fp21, asm: "VPMAXUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VPMINUB512", argLength: 2, reg: fp21, asm: "VPMINUB", commutative: true, typ: "Vec512", resultInArg0: false}, + {name: "VCMPPS512", argLength: 2, reg: fp2k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPSMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS128", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPSMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPS256", argLength: 2, reg: fp21, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPSMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPS", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD128", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec128", resultInArg0: false}, + {name: "VCMPPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD256", argLength: 2, reg: fp21, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Vec256", resultInArg0: false}, + {name: "VCMPPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPD512", argLength: 2, reg: fp2k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VCMPPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VCMPPD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW256", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW512", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPW128", argLength: 2, reg: fp2k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPW", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD512", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD128", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPD256", argLength: 2, reg: fp2k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPD", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ128", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ256", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQ512", argLength: 2, reg: fp2k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPQ", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB128", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB256", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPB512", argLength: 2, reg: fp2k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPB", aux: "Int8", commutative: false, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW256", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW512", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUW128", argLength: 2, reg: fp2k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUWMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUW", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD512", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD128", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUD256", argLength: 2, reg: fp2k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUDMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUD", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ128", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ256", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQ512", argLength: 2, reg: fp2k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUQMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUQ", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB128", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked128", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB256", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked256", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUB512", argLength: 2, reg: fp2k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, + {name: "VPCMPUBMasked512", argLength: 3, reg: fp2k1k1, asm: "VPCMPUB", aux: "Int8", commutative: true, typ: "Mask", resultInArg0: false}, } } diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 97a4a48253..c7abca814e 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1202,6 +1202,7 @@ const ( OpAMD64VRCP14PS512 OpAMD64VRSQRT14PS512 OpAMD64VDIVPS512 + OpAMD64VADDPSMasked512 OpAMD64VANDPSMasked512 OpAMD64VANDNPSMasked512 OpAMD64VRCP14PSMasked512 @@ -1213,7 +1214,6 @@ const ( OpAMD64VSCALEFPSMasked512 OpAMD64VORPSMasked512 OpAMD64VSQRTPSMasked512 - OpAMD64VADDPSMasked512 OpAMD64VXORPSMasked512 OpAMD64VMAXPS512 OpAMD64VMINPS512 @@ -1222,6 +1222,7 @@ const ( OpAMD64VORPS512 OpAMD64VSQRTPS512 OpAMD64VXORPS512 + OpAMD64VADDPS128 OpAMD64VANDPS128 OpAMD64VANDNPS128 OpAMD64VRCP14PS128 @@ -1248,7 +1249,6 @@ const ( OpAMD64VHADDPS128 OpAMD64VHSUBPS128 OpAMD64VSQRTPS128 - OpAMD64VADDPS128 OpAMD64VXORPS128 OpAMD64VADDPS256 OpAMD64VANDPS256 @@ -1256,6 +1256,7 @@ const ( OpAMD64VRCP14PS256 OpAMD64VRSQRTPS256 OpAMD64VDIVPS256 + OpAMD64VADDPSMasked256 OpAMD64VANDPSMasked256 OpAMD64VANDNPSMasked256 OpAMD64VRCP14PSMasked256 @@ -1267,7 +1268,6 @@ const ( OpAMD64VSCALEFPSMasked256 OpAMD64VORPSMasked256 OpAMD64VSQRTPSMasked256 - OpAMD64VADDPSMasked256 OpAMD64VXORPSMasked256 OpAMD64VMAXPS256 OpAMD64VMINPS256 @@ -1312,6 +1312,7 @@ const ( OpAMD64VRCP14PD256 OpAMD64VRSQRT14PD256 OpAMD64VDIVPD256 + OpAMD64VADDPDMasked256 OpAMD64VANDPDMasked256 OpAMD64VANDNPDMasked256 OpAMD64VRCP14PDMasked256 @@ -1323,7 +1324,6 @@ const ( OpAMD64VSCALEFPDMasked256 OpAMD64VORPDMasked256 OpAMD64VSQRTPDMasked256 - OpAMD64VADDPDMasked256 OpAMD64VXORPDMasked256 OpAMD64VMAXPD256 OpAMD64VMINPD256 @@ -1334,11 +1334,13 @@ const ( OpAMD64VHSUBPD256 OpAMD64VSQRTPD256 OpAMD64VXORPD256 + OpAMD64VADDPD512 OpAMD64VANDPD512 OpAMD64VANDNPD512 OpAMD64VRCP14PD512 OpAMD64VRSQRT14PD512 OpAMD64VDIVPD512 + OpAMD64VADDPDMasked512 OpAMD64VANDPDMasked512 OpAMD64VANDNPDMasked512 OpAMD64VRCP14PDMasked512 @@ -1350,7 +1352,6 @@ const ( OpAMD64VSCALEFPDMasked512 OpAMD64VORPDMasked512 OpAMD64VSQRTPDMasked512 - OpAMD64VADDPDMasked512 OpAMD64VXORPDMasked512 OpAMD64VMAXPD512 OpAMD64VMINPD512 @@ -1358,10 +1359,11 @@ const ( OpAMD64VSCALEFPD512 OpAMD64VORPD512 OpAMD64VSQRTPD512 - OpAMD64VADDPD512 OpAMD64VXORPD512 OpAMD64VPABSW256 OpAMD64VPADDW256 + OpAMD64VPAND256 + OpAMD64VPANDN256 OpAMD64VPCMPEQW256 OpAMD64VPCMPGTW256 OpAMD64VPABSWMasked256 @@ -1372,6 +1374,7 @@ const ( OpAMD64VPMINSWMasked256 OpAMD64VPMULHWMasked256 OpAMD64VPMULLWMasked256 + OpAMD64VPOPCNTWMasked256 OpAMD64VPADDSWMasked256 OpAMD64VPSUBSWMasked256 OpAMD64VPSUBWMasked256 @@ -1379,33 +1382,49 @@ const ( OpAMD64VPMINSW256 OpAMD64VPMULHW256 OpAMD64VPMULLW256 + OpAMD64VPOR256 + OpAMD64VPHADDW256 OpAMD64VPHSUBW256 + OpAMD64VPOPCNTW256 + OpAMD64VPADDSW256 OpAMD64VPHADDSW256 OpAMD64VPHSUBSW256 OpAMD64VPSUBSW256 OpAMD64VPSIGNW256 OpAMD64VPSUBW256 + OpAMD64VPXOR256 OpAMD64VPABSW512 OpAMD64VPADDW512 OpAMD64VPCMPEQW512 OpAMD64VPCMPGTW512 OpAMD64VPABSWMasked512 + OpAMD64VPADDWMasked512 OpAMD64VPCMPEQWMasked512 OpAMD64VPCMPGTWMasked512 OpAMD64VPMAXSWMasked512 OpAMD64VPMINSWMasked512 OpAMD64VPMULHWMasked512 OpAMD64VPMULLWMasked512 + OpAMD64VPOPCNTWMasked512 + OpAMD64VPADDSWMasked512 + OpAMD64VPSUBSWMasked512 + OpAMD64VPSUBWMasked512 OpAMD64VPMAXSW512 OpAMD64VPMINSW512 OpAMD64VPMULHW512 OpAMD64VPMULLW512 + OpAMD64VPOPCNTW512 + OpAMD64VPADDSW512 OpAMD64VPSUBSW512 + OpAMD64VPSUBW512 OpAMD64VPABSW128 OpAMD64VPADDW128 + OpAMD64VPAND128 + OpAMD64VPANDN128 OpAMD64VPCMPEQW128 OpAMD64VPCMPGTW128 OpAMD64VPABSWMasked128 + OpAMD64VPADDWMasked128 OpAMD64VPCMPEQWMasked128 OpAMD64VPCMPGTWMasked128 OpAMD64VPMAXSWMasked128 @@ -1413,21 +1432,40 @@ const ( OpAMD64VPMULHWMasked128 OpAMD64VPMULLWMasked128 OpAMD64VPOPCNTWMasked128 + OpAMD64VPADDSWMasked128 OpAMD64VPSUBSWMasked128 + OpAMD64VPSUBWMasked128 OpAMD64VPMAXSW128 OpAMD64VPMINSW128 OpAMD64VPMULHW128 OpAMD64VPMULLW128 + OpAMD64VPOR128 + OpAMD64VPHADDW128 OpAMD64VPHSUBW128 + OpAMD64VPOPCNTW128 + OpAMD64VPADDSW128 OpAMD64VPHADDSW128 OpAMD64VPHSUBSW128 + OpAMD64VPSUBSW128 OpAMD64VPSIGNW128 + OpAMD64VPSUBW128 + OpAMD64VPXOR128 OpAMD64VPABSD512 + OpAMD64VPADDD512 OpAMD64VPANDD512 + OpAMD64VPANDND512 + OpAMD64VPCMPEQD512 + OpAMD64VPCMPGTD512 OpAMD64VPABSDMasked512 + OpAMD64VPADDDMasked512 + OpAMD64VPANDDMasked512 + OpAMD64VPANDNDMasked512 + OpAMD64VPCMPEQDMasked512 + OpAMD64VPCMPGTDMasked512 OpAMD64VPMAXSDMasked512 OpAMD64VPMINSDMasked512 OpAMD64VPMULLDMasked512 + OpAMD64VPORDMasked512 OpAMD64VPOPCNTDMasked512 OpAMD64VPSUBDMasked512 OpAMD64VPXORDMasked512 @@ -1435,12 +1473,19 @@ const ( OpAMD64VPMINSD512 OpAMD64VPMULLD512 OpAMD64VPORD512 + OpAMD64VPOPCNTD512 + OpAMD64VPSUBD512 OpAMD64VPXORD512 OpAMD64VPABSD128 + OpAMD64VPADDD128 OpAMD64VPCMPEQD128 OpAMD64VPCMPGTD128 OpAMD64VPABSDMasked128 + OpAMD64VPADDDMasked128 OpAMD64VPANDDMasked128 + OpAMD64VPANDNDMasked128 + OpAMD64VPCMPEQDMasked128 + OpAMD64VPCMPGTDMasked128 OpAMD64VPMAXSDMasked128 OpAMD64VPMINSDMasked128 OpAMD64VPMULLDMasked128 @@ -1450,31 +1495,45 @@ const ( OpAMD64VPXORDMasked128 OpAMD64VPMAXSD128 OpAMD64VPMINSD128 + OpAMD64VPMULDQ128 OpAMD64VPMULLD128 + OpAMD64VPHADDD128 OpAMD64VPHSUBD128 + OpAMD64VPOPCNTD128 OpAMD64VPSIGND128 OpAMD64VPSUBD128 OpAMD64VPABSD256 - OpAMD64VPAND256 + OpAMD64VPADDD256 OpAMD64VPCMPEQD256 OpAMD64VPCMPGTD256 OpAMD64VPABSDMasked256 + OpAMD64VPADDDMasked256 + OpAMD64VPANDDMasked256 + OpAMD64VPANDNDMasked256 + OpAMD64VPCMPEQDMasked256 + OpAMD64VPCMPGTDMasked256 OpAMD64VPMAXSDMasked256 OpAMD64VPMINSDMasked256 OpAMD64VPMULLDMasked256 OpAMD64VPORDMasked256 + OpAMD64VPOPCNTDMasked256 OpAMD64VPSUBDMasked256 + OpAMD64VPXORDMasked256 OpAMD64VPMAXSD256 OpAMD64VPMINSD256 + OpAMD64VPMULDQ256 OpAMD64VPMULLD256 + OpAMD64VPHADDD256 OpAMD64VPHSUBD256 OpAMD64VPOPCNTD256 OpAMD64VPSIGND256 OpAMD64VPSUBD256 OpAMD64VPABSQ128 + OpAMD64VPADDQ128 OpAMD64VPCMPEQQ128 OpAMD64VPCMPGTQ128 OpAMD64VPABSQMasked128 + OpAMD64VPADDQMasked128 OpAMD64VPANDQMasked128 OpAMD64VPANDNQMasked128 OpAMD64VPCMPEQQMasked128 @@ -1483,17 +1542,21 @@ const ( OpAMD64VPMINSQMasked128 OpAMD64VPMULDQMasked128 OpAMD64VPMULLQMasked128 + OpAMD64VPORQMasked128 + OpAMD64VPOPCNTQMasked128 OpAMD64VPSUBQMasked128 + OpAMD64VPXORQMasked128 OpAMD64VPMAXSQ128 OpAMD64VPMINSQ128 - OpAMD64VPMULDQ128 OpAMD64VPMULLQ128 - OpAMD64VPOR128 + OpAMD64VPOPCNTQ128 + OpAMD64VPSUBQ128 OpAMD64VPABSQ256 OpAMD64VPADDQ256 OpAMD64VPCMPEQQ256 OpAMD64VPCMPGTQ256 OpAMD64VPABSQMasked256 + OpAMD64VPADDQMasked256 OpAMD64VPANDQMasked256 OpAMD64VPANDNQMasked256 OpAMD64VPCMPEQQMasked256 @@ -1505,19 +1568,21 @@ const ( OpAMD64VPORQMasked256 OpAMD64VPOPCNTQMasked256 OpAMD64VPSUBQMasked256 + OpAMD64VPXORQMasked256 OpAMD64VPMAXSQ256 OpAMD64VPMINSQ256 - OpAMD64VPMULDQ256 OpAMD64VPMULLQ256 - OpAMD64VPOR256 OpAMD64VPOPCNTQ256 OpAMD64VPSUBQ256 OpAMD64VPABSQ512 + OpAMD64VPADDQ512 OpAMD64VPANDQ512 + OpAMD64VPANDNQ512 OpAMD64VPCMPEQQ512 OpAMD64VPCMPGTQ512 OpAMD64VPABSQMasked512 OpAMD64VPADDQMasked512 + OpAMD64VPANDQMasked512 OpAMD64VPANDNQMasked512 OpAMD64VPCMPEQQMasked512 OpAMD64VPCMPGTQMasked512 @@ -1525,48 +1590,78 @@ const ( OpAMD64VPMINSQMasked512 OpAMD64VPMULDQMasked512 OpAMD64VPMULLQMasked512 + OpAMD64VPORQMasked512 + OpAMD64VPOPCNTQMasked512 + OpAMD64VPSUBQMasked512 + OpAMD64VPXORQMasked512 OpAMD64VPMAXSQ512 OpAMD64VPMINSQ512 OpAMD64VPMULDQ512 OpAMD64VPMULLQ512 + OpAMD64VPORQ512 OpAMD64VPOPCNTQ512 OpAMD64VPSUBQ512 OpAMD64VPXORQ512 OpAMD64VPABSB128 OpAMD64VPADDB128 - OpAMD64VPAND128 OpAMD64VPCMPEQB128 OpAMD64VPCMPGTB128 OpAMD64VPABSBMasked128 OpAMD64VPADDBMasked128 + OpAMD64VPCMPEQBMasked128 + OpAMD64VPCMPGTBMasked128 OpAMD64VPMAXSBMasked128 OpAMD64VPMINSBMasked128 + OpAMD64VPOPCNTBMasked128 + OpAMD64VPADDSBMasked128 OpAMD64VPSUBSBMasked128 + OpAMD64VPSUBBMasked128 OpAMD64VPMAXSB128 OpAMD64VPMINSB128 + OpAMD64VPOPCNTB128 + OpAMD64VPADDSB128 + OpAMD64VPSUBSB128 OpAMD64VPSIGNB128 OpAMD64VPSUBB128 OpAMD64VPABSB256 OpAMD64VPADDB256 - OpAMD64VPANDN256 OpAMD64VPCMPEQB256 OpAMD64VPCMPGTB256 OpAMD64VPABSBMasked256 + OpAMD64VPADDBMasked256 + OpAMD64VPCMPEQBMasked256 + OpAMD64VPCMPGTBMasked256 OpAMD64VPMAXSBMasked256 OpAMD64VPMINSBMasked256 + OpAMD64VPOPCNTBMasked256 + OpAMD64VPADDSBMasked256 OpAMD64VPSUBSBMasked256 + OpAMD64VPSUBBMasked256 OpAMD64VPMAXSB256 OpAMD64VPMINSB256 OpAMD64VPOPCNTB256 + OpAMD64VPADDSB256 + OpAMD64VPSUBSB256 OpAMD64VPSIGNB256 + OpAMD64VPSUBB256 OpAMD64VPABSB512 + OpAMD64VPADDB512 + OpAMD64VPCMPEQB512 + OpAMD64VPCMPGTB512 OpAMD64VPABSBMasked512 + OpAMD64VPADDBMasked512 + OpAMD64VPCMPEQBMasked512 + OpAMD64VPCMPGTBMasked512 OpAMD64VPMAXSBMasked512 OpAMD64VPMINSBMasked512 + OpAMD64VPOPCNTBMasked512 OpAMD64VPADDSBMasked512 + OpAMD64VPSUBSBMasked512 + OpAMD64VPSUBBMasked512 OpAMD64VPMAXSB512 OpAMD64VPMINSB512 OpAMD64VPOPCNTB512 + OpAMD64VPADDSB512 OpAMD64VPSUBSB512 OpAMD64VPSUBB512 OpAMD64VPAVGW256 @@ -1574,152 +1669,73 @@ const ( OpAMD64VPMAXUWMasked256 OpAMD64VPMINUWMasked256 OpAMD64VPMULHUWMasked256 - OpAMD64VPOPCNTWMasked256 OpAMD64VPMAXUW256 OpAMD64VPMINUW256 OpAMD64VPMULHUW256 - OpAMD64VPHADDW256 - OpAMD64VPOPCNTW256 - OpAMD64VPADDSW256 OpAMD64VPAVGW512 - OpAMD64VPADDWMasked512 OpAMD64VPAVGWMasked512 OpAMD64VPMAXUWMasked512 OpAMD64VPMINUWMasked512 OpAMD64VPMULHUWMasked512 - OpAMD64VPOPCNTWMasked512 - OpAMD64VPADDSWMasked512 - OpAMD64VPSUBSWMasked512 - OpAMD64VPSUBWMasked512 OpAMD64VPMAXUW512 OpAMD64VPMINUW512 OpAMD64VPMULHUW512 - OpAMD64VPOPCNTW512 - OpAMD64VPADDSW512 - OpAMD64VPSUBW512 OpAMD64VPAVGW128 - OpAMD64VPADDWMasked128 OpAMD64VPAVGWMasked128 OpAMD64VPMAXUWMasked128 OpAMD64VPMINUWMasked128 OpAMD64VPMULHUWMasked128 - OpAMD64VPADDSWMasked128 - OpAMD64VPSUBWMasked128 OpAMD64VPMAXUW128 OpAMD64VPMINUW128 OpAMD64VPMULHUW128 - OpAMD64VPHADDW128 - OpAMD64VPOPCNTW128 - OpAMD64VPADDSW128 - OpAMD64VPSUBSW128 - OpAMD64VPSUBW128 - OpAMD64VPADDD512 - OpAMD64VPANDND512 - OpAMD64VPADDDMasked512 - OpAMD64VPANDDMasked512 - OpAMD64VPANDNDMasked512 OpAMD64VPMAXUDMasked512 OpAMD64VPMINUDMasked512 - OpAMD64VPORDMasked512 OpAMD64VPMAXUD512 OpAMD64VPMINUD512 - OpAMD64VPOPCNTD512 - OpAMD64VPSUBD512 - OpAMD64VPADDD128 - OpAMD64VPADDDMasked128 - OpAMD64VPANDNDMasked128 OpAMD64VPMAXUDMasked128 OpAMD64VPMINUDMasked128 OpAMD64VPMAXUD128 OpAMD64VPMINUD128 - OpAMD64VPHADDD128 - OpAMD64VPOPCNTD128 - OpAMD64VPADDD256 - OpAMD64VPADDDMasked256 - OpAMD64VPANDDMasked256 - OpAMD64VPANDNDMasked256 + OpAMD64VPMULUDQ128 OpAMD64VPMAXUDMasked256 OpAMD64VPMINUDMasked256 - OpAMD64VPOPCNTDMasked256 - OpAMD64VPXORDMasked256 OpAMD64VPMAXUD256 OpAMD64VPMINUD256 OpAMD64VPMULUDQ256 - OpAMD64VPHADDD256 - OpAMD64VPXOR256 - OpAMD64VPADDQ128 - OpAMD64VPADDQMasked128 OpAMD64VPMAXUQMasked128 OpAMD64VPMINUQMasked128 OpAMD64VPMULUDQMasked128 - OpAMD64VPORQMasked128 - OpAMD64VPOPCNTQMasked128 - OpAMD64VPXORQMasked128 OpAMD64VPMAXUQ128 OpAMD64VPMINUQ128 - OpAMD64VPMULUDQ128 - OpAMD64VPOPCNTQ128 - OpAMD64VPSUBQ128 - OpAMD64VPXOR128 - OpAMD64VPADDQMasked256 OpAMD64VPMAXUQMasked256 OpAMD64VPMINUQMasked256 OpAMD64VPMULUDQMasked256 - OpAMD64VPXORQMasked256 OpAMD64VPMAXUQ256 OpAMD64VPMINUQ256 - OpAMD64VPADDQ512 - OpAMD64VPANDNQ512 - OpAMD64VPANDQMasked512 OpAMD64VPMAXUQMasked512 OpAMD64VPMINUQMasked512 OpAMD64VPMULUDQMasked512 - OpAMD64VPORQMasked512 - OpAMD64VPOPCNTQMasked512 - OpAMD64VPSUBQMasked512 - OpAMD64VPXORQMasked512 OpAMD64VPMAXUQ512 OpAMD64VPMINUQ512 OpAMD64VPMULUDQ512 - OpAMD64VPORQ512 - OpAMD64VPANDN128 OpAMD64VPAVGB128 OpAMD64VPAVGBMasked128 OpAMD64VPMAXUBMasked128 OpAMD64VPMINUBMasked128 - OpAMD64VPOPCNTBMasked128 - OpAMD64VPADDSBMasked128 - OpAMD64VPSUBBMasked128 OpAMD64VPMAXUB128 OpAMD64VPMINUB128 - OpAMD64VPOPCNTB128 - OpAMD64VPADDSB128 - OpAMD64VPSUBSB128 OpAMD64VPAVGB256 - OpAMD64VPADDBMasked256 OpAMD64VPAVGBMasked256 OpAMD64VPMAXUBMasked256 OpAMD64VPMINUBMasked256 - OpAMD64VPOPCNTBMasked256 - OpAMD64VPADDSBMasked256 - OpAMD64VPSUBBMasked256 OpAMD64VPMAXUB256 OpAMD64VPMINUB256 - OpAMD64VPADDSB256 - OpAMD64VPSUBSB256 - OpAMD64VPSUBB256 - OpAMD64VPADDB512 OpAMD64VPAVGB512 - OpAMD64VPADDBMasked512 OpAMD64VPAVGBMasked512 OpAMD64VPMAXUBMasked512 OpAMD64VPMINUBMasked512 - OpAMD64VPOPCNTBMasked512 - OpAMD64VPSUBSBMasked512 - OpAMD64VPSUBBMasked512 OpAMD64VPMAXUB512 OpAMD64VPMINUB512 - OpAMD64VPADDSB512 OpAMD64VCMPPS512 OpAMD64VCMPPSMasked512 OpAMD64VCMPPS128 @@ -1734,26 +1750,26 @@ const ( OpAMD64VCMPPDMasked512 OpAMD64VPCMPW256 OpAMD64VPCMPWMasked256 - OpAMD64VPCMPWMasked512 OpAMD64VPCMPW512 + OpAMD64VPCMPWMasked512 OpAMD64VPCMPW128 OpAMD64VPCMPWMasked128 OpAMD64VPCMPD512 OpAMD64VPCMPDMasked512 - OpAMD64VPCMPDMasked128 OpAMD64VPCMPD128 + OpAMD64VPCMPDMasked128 OpAMD64VPCMPD256 OpAMD64VPCMPDMasked256 OpAMD64VPCMPQ128 OpAMD64VPCMPQMasked128 OpAMD64VPCMPQ256 OpAMD64VPCMPQMasked256 - OpAMD64VPCMPQMasked512 OpAMD64VPCMPQ512 - OpAMD64VPCMPBMasked128 + OpAMD64VPCMPQMasked512 OpAMD64VPCMPB128 - OpAMD64VPCMPBMasked256 + OpAMD64VPCMPBMasked128 OpAMD64VPCMPB256 + OpAMD64VPCMPBMasked256 OpAMD64VPCMPB512 OpAMD64VPCMPBMasked512 OpAMD64VPCMPUW256 @@ -1762,16 +1778,16 @@ const ( OpAMD64VPCMPUWMasked512 OpAMD64VPCMPUW128 OpAMD64VPCMPUWMasked128 - OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD512 + OpAMD64VPCMPUDMasked512 OpAMD64VPCMPUD128 OpAMD64VPCMPUDMasked128 - OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUD256 + OpAMD64VPCMPUDMasked256 OpAMD64VPCMPUQ128 OpAMD64VPCMPUQMasked128 - OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ256 + OpAMD64VPCMPUQMasked256 OpAMD64VPCMPUQ512 OpAMD64VPCMPUQMasked512 OpAMD64VPCMPUB128 @@ -17758,6 +17774,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked512", argLen: 3, @@ -17926,21 +17958,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked512", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked512", argLen: 3, @@ -18059,6 +18076,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPS128", + argLen: 2, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPS128", argLen: 2, @@ -18444,20 +18476,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPS128", - argLen: 2, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPS128", argLen: 2, @@ -18558,6 +18576,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPSMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPS, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPSMasked256", argLen: 3, @@ -18726,21 +18760,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPSMasked256", - argLen: 3, - asm: x86.AVADDPS, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPSMasked256", argLen: 3, @@ -19387,6 +19406,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked256", argLen: 3, @@ -19555,21 +19590,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked256", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked256", argLen: 3, @@ -19716,6 +19736,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPD512", + argLen: 2, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPD512", argLen: 2, @@ -19786,6 +19821,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VADDPDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVADDPD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VANDPDMasked512", argLen: 3, @@ -19954,21 +20005,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VADDPDMasked512", - argLen: 3, - asm: x86.AVADDPD, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, { name: "VXORPDMasked512", argLen: 3, @@ -20073,9 +20109,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VADDPD512", - argLen: 2, - asm: x86.AVADDPD, + name: "VXORPD512", + argLen: 2, + commutative: true, + asm: x86.AVXORPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20087,10 +20124,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VXORPD512", + name: "VPABSW256", + argLen: 1, + asm: x86.AVPABSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDW256", argLen: 2, commutative: true, - asm: x86.AVXORPD, + asm: x86.AVPADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20102,12 +20152,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSW256", - argLen: 1, - asm: x86.AVPABSW, + name: "VPAND256", + argLen: 2, + commutative: true, + asm: x86.AVPAND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20115,10 +20167,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDW256", + name: "VPANDN256", argLen: 2, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPANDN, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20283,6 +20335,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTWMasked256", + argLen: 2, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPADDSWMasked256", argLen: 3, @@ -20389,6 +20455,35 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOR256", + argLen: 2, + commutative: true, + asm: x86.AVPOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPHADDW256", + argLen: 2, + asm: x86.AVPHADDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHSUBW256", argLen: 2, @@ -20403,6 +20498,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW256", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW256", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPHADDSW256", argLen: 2, @@ -20473,6 +20596,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPXOR256", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW512", argLen: 1, @@ -20544,6 +20682,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked512", argLen: 3, @@ -20640,14 +20794,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSW, + name: "VPOPCNTWMasked512", + argLen: 2, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20655,14 +20808,75 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSW512", - argLen: 2, + name: "VPADDSWMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINSW, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSWMasked512", + argLen: 3, + asm: x86.AVPSUBSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBWMasked512", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINSW512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20699,6 +20913,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPOPCNTW512", + argLen: 1, + asm: x86.AVPOPCNTW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSW512", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSW512", argLen: 2, @@ -20713,6 +20955,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBW512", + argLen: 2, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPABSW128", argLen: 1, @@ -20741,6 +20997,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPAND128", + argLen: 2, + commutative: true, + asm: x86.AVPAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDN128", + argLen: 2, + commutative: true, + asm: x86.AVPANDN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQW128", argLen: 2, @@ -20784,6 +21070,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPCMPEQWMasked128", argLen: 3, @@ -20893,6 +21195,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPADDSWMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDSW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPSUBSWMasked128", argLen: 3, @@ -20908,6 +21226,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "VPSUBWMasked128", + argLen: 3, + asm: x86.AVPSUBW, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, { name: "VPMAXSW128", argLen: 2, @@ -20969,9 +21302,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBW128", - argLen: 2, - asm: x86.AVPHSUBW, + name: "VPOR128", + argLen: 2, + commutative: true, + asm: x86.AVPOR, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20983,9 +21317,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDSW128", + name: "VPHADDW128", argLen: 2, - asm: x86.AVPHADDSW, + asm: x86.AVPHADDW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -20997,9 +21331,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBSW128", + name: "VPHSUBW128", argLen: 2, - asm: x86.AVPHSUBSW, + asm: x86.AVPHSUBW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21011,13 +21345,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNW128", - argLen: 2, - asm: x86.AVPSIGNW, + name: "VPOPCNTW128", + argLen: 1, + asm: x86.AVPOPCNTW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21025,12 +21358,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD512", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDSW128", + argLen: 2, + commutative: true, + asm: x86.AVPADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21038,10 +21373,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDD512", - argLen: 2, - commutative: true, - asm: x86.AVPANDD, + name: "VPHADDSW128", + argLen: 2, + asm: x86.AVPHADDSW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21053,13 +21387,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked512", + name: "VPHSUBSW128", argLen: 2, - asm: x86.AVPABSD, + asm: x86.AVPHSUBSW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21067,15 +21401,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSD, + name: "VPSUBSW128", + argLen: 2, + asm: x86.AVPSUBSW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21083,15 +21415,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMINSD, + name: "VPSIGNW128", + argLen: 2, + asm: x86.AVPSIGNW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21099,15 +21429,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMULLD, + name: "VPSUBW128", + argLen: 2, + asm: x86.AVPSUBW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21115,13 +21443,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked512", - argLen: 2, - asm: x86.AVPOPCNTD, + name: "VPXOR128", + argLen: 2, + commutative: true, + asm: x86.AVPXOR, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21129,14 +21458,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked512", - argLen: 3, - asm: x86.AVPSUBD, + name: "VPABSD512", + argLen: 1, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21144,15 +21471,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked512", - argLen: 3, + name: "VPADDD512", + argLen: 2, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21160,10 +21486,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD512", + name: "VPANDD512", argLen: 2, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21175,10 +21501,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD512", + name: "VPANDND512", argLen: 2, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21190,44 +21516,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD512", + name: "VPCMPEQD512", argLen: 2, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPORD512", - argLen: 2, - commutative: true, - asm: x86.AVPORD, + name: "VPCMPGTD512", + argLen: 2, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPXORD512", - argLen: 2, - commutative: true, - asm: x86.AVPXORD, + name: "VPABSDMasked512", + argLen: 2, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21235,12 +21559,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD128", - argLen: 1, - asm: x86.AVPABSD, + name: "VPADDDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21248,14 +21575,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD128", - argLen: 2, + name: "VPANDDMasked512", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21263,13 +21591,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD128", - argLen: 2, - asm: x86.AVPCMPGTD, + name: "VPANDNDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21277,24 +21607,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked128", - argLen: 2, - asm: x86.AVPABSD, + name: "VPCMPEQDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDD, + name: "VPCMPGTDMasked512", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21302,12 +21633,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSDMasked128", + name: "VPMAXSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMAXSD, @@ -21323,7 +21654,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked128", + name: "VPMINSDMasked512", argLen: 3, commutative: true, asm: x86.AVPMINSD, @@ -21339,7 +21670,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked128", + name: "VPMULLDMasked512", argLen: 3, commutative: true, asm: x86.AVPMULLD, @@ -21355,7 +21686,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked128", + name: "VPORDMasked512", argLen: 3, commutative: true, asm: x86.AVPORD, @@ -21371,7 +21702,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked128", + name: "VPOPCNTDMasked512", argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ @@ -21385,7 +21716,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBDMasked128", + name: "VPSUBDMasked512", argLen: 3, asm: x86.AVPSUBD, reg: regInfo{ @@ -21400,7 +21731,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked128", + name: "VPXORDMasked512", argLen: 3, commutative: true, asm: x86.AVPXORD, @@ -21416,7 +21747,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSD128", + name: "VPMAXSD512", argLen: 2, commutative: true, asm: x86.AVPMAXSD, @@ -21431,7 +21762,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD128", + name: "VPMINSD512", argLen: 2, commutative: true, asm: x86.AVPMINSD, @@ -21446,7 +21777,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD128", + name: "VPMULLD512", argLen: 2, commutative: true, asm: x86.AVPMULLD, @@ -21461,9 +21792,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD128", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORD512", + argLen: 2, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21475,13 +21807,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND128", - argLen: 2, - asm: x86.AVPSIGND, + name: "VPOPCNTD512", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21489,7 +21820,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBD128", + name: "VPSUBD512", argLen: 2, asm: x86.AVPSUBD, reg: regInfo{ @@ -21503,7 +21834,22 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSD256", + name: "VPXORD512", + argLen: 2, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSD128", argLen: 1, asm: x86.AVPABSD, reg: regInfo{ @@ -21516,10 +21862,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAND256", + name: "VPADDD128", argLen: 2, commutative: true, - asm: x86.AVPAND, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21531,7 +21877,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQD256", + name: "VPCMPEQD128", argLen: 2, commutative: true, asm: x86.AVPCMPEQD, @@ -21546,7 +21892,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTD256", + name: "VPCMPGTD128", argLen: 2, asm: x86.AVPCMPGTD, reg: regInfo{ @@ -21560,7 +21906,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSDMasked256", + name: "VPABSDMasked128", argLen: 2, asm: x86.AVPABSD, reg: regInfo{ @@ -21574,10 +21920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSDMasked256", + name: "VPADDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSD, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21590,10 +21936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSDMasked256", + name: "VPANDDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINSD, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21606,10 +21952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLDMasked256", + name: "VPANDNDMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULLD, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21622,10 +21968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked256", + name: "VPCMPEQDMasked128", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21633,14 +21979,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBDMasked256", + name: "VPCMPGTDMasked128", argLen: 3, - asm: x86.AVPSUBD, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21648,19 +21994,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSD256", - argLen: 2, + name: "VPMAXSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21668,14 +22015,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSD256", - argLen: 2, + name: "VPMINSDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21683,14 +22031,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLD256", - argLen: 2, + name: "VPMULLDMasked128", + argLen: 3, commutative: true, asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21698,13 +22047,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHSUBD256", - argLen: 2, - asm: x86.AVPHSUBD, + name: "VPORDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21712,12 +22063,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD256", - argLen: 1, + name: "VPOPCNTDMasked128", + argLen: 2, asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21725,94 +22077,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGND256", - argLen: 2, - asm: x86.AVPSIGND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBD256", - argLen: 2, + name: "VPSUBDMasked128", + argLen: 3, asm: x86.AVPSUBD, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSQ128", - argLen: 1, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQQ128", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTQ128", - argLen: 2, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPABSQMasked128", - argLen: 2, - asm: x86.AVPABSQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPANDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21825,10 +22092,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked128", + name: "VPXORDMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPXORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -21841,46 +22108,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPCMPEQQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPCMPGTQMasked128", - argLen: 3, - asm: x86.AVPCMPGTQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, - { - name: "VPMAXSQMasked128", - argLen: 3, + name: "VPMAXSD128", + argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21888,15 +22123,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked128", - argLen: 3, + name: "VPMINSD128", + argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21904,15 +22138,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked128", - argLen: 3, + name: "VPMULDQ128", + argLen: 2, commutative: true, asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21920,30 +22153,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked128", - argLen: 3, + name: "VPMULLD128", + argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQMasked128", - argLen: 3, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21951,10 +22168,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPHADDD128", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21966,10 +22182,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, + name: "VPHSUBD128", + argLen: 2, + asm: x86.AVPHSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21981,14 +22196,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, + name: "VPOPCNTD128", + argLen: 1, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -21996,10 +22209,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ128", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, + name: "VPSIGND128", + argLen: 2, + asm: x86.AVPSIGND, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22011,10 +22223,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR128", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPSUBD128", + argLen: 2, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22026,9 +22237,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ256", + name: "VPABSD256", argLen: 1, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22039,10 +22250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ256", + name: "VPADDD256", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22054,10 +22265,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ256", + name: "VPCMPEQD256", argLen: 2, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22069,9 +22280,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQ256", + name: "VPCMPGTD256", argLen: 2, - asm: x86.AVPCMPGTQ, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22083,9 +22294,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked256", + name: "VPABSDMasked256", argLen: 2, - asm: x86.AVPABSQ, + asm: x86.AVPABSD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22097,10 +22308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked256", + name: "VPADDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22113,10 +22324,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked256", + name: "VPANDDMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPANDD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22129,10 +22340,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked256", + name: "VPANDNDMasked256", argLen: 3, commutative: true, - asm: x86.AVPCMPEQQ, + asm: x86.AVPANDND, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22140,14 +22351,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQMasked256", - argLen: 3, - asm: x86.AVPCMPGTQ, + name: "VPCMPEQDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22160,10 +22372,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSQ, + name: "VPCMPGTDMasked256", + argLen: 3, + asm: x86.AVPCMPGTD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22171,15 +22382,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINSQMasked256", + name: "VPMAXSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22192,10 +22403,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked256", + name: "VPMINSDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULDQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22208,10 +22419,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked256", + name: "VPMULLDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22224,10 +22435,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked256", + name: "VPORDMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPORD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22240,9 +22451,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked256", + name: "VPOPCNTDMasked256", argLen: 2, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22254,9 +22465,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked256", + name: "VPSUBDMasked256", argLen: 3, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22269,10 +22480,26 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ256", + name: "VPXORDMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPXORD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXSD256", argLen: 2, commutative: true, - asm: x86.AVPMAXSQ, + asm: x86.AVPMAXSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22284,10 +22511,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQ256", + name: "VPMINSD256", argLen: 2, commutative: true, - asm: x86.AVPMINSQ, + asm: x86.AVPMINSD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22314,10 +22541,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQ256", + name: "VPMULLD256", argLen: 2, commutative: true, - asm: x86.AVPMULLQ, + asm: x86.AVPMULLD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22329,10 +22556,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOR256", - argLen: 2, - commutative: true, - asm: x86.AVPOR, + name: "VPHADDD256", + argLen: 2, + asm: x86.AVPHADDD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22344,9 +22570,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQ256", + name: "VPHSUBD256", + argLen: 2, + asm: x86.AVPHSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPOPCNTD256", argLen: 1, - asm: x86.AVPOPCNTQ, + asm: x86.AVPOPCNTD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22357,9 +22597,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQ256", + name: "VPSIGND256", + argLen: 2, + asm: x86.AVPSIGND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBD256", argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPSUBD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22371,7 +22625,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQ512", + name: "VPABSQ128", argLen: 1, asm: x86.AVPABSQ, reg: regInfo{ @@ -22384,10 +22638,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQ512", + name: "VPADDQ128", argLen: 2, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22399,7 +22653,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQ512", + name: "VPCMPEQQ128", argLen: 2, commutative: true, asm: x86.AVPCMPEQQ, @@ -22409,12 +22663,12 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPCMPGTQ512", + name: "VPCMPGTQ128", argLen: 2, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22428,7 +22682,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSQMasked512", + name: "VPABSQMasked128", argLen: 2, asm: x86.AVPABSQ, reg: regInfo{ @@ -22442,7 +22696,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked512", + name: "VPADDQMasked128", argLen: 3, commutative: true, asm: x86.AVPADDQ, @@ -22458,7 +22712,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQMasked512", + name: "VPANDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQMasked128", argLen: 3, commutative: true, asm: x86.AVPANDNQ, @@ -22474,7 +22744,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQQMasked512", + name: "VPCMPEQQMasked128", argLen: 3, commutative: true, asm: x86.AVPCMPEQQ, @@ -22490,7 +22760,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTQMasked512", + name: "VPCMPGTQMasked128", argLen: 3, asm: x86.AVPCMPGTQ, reg: regInfo{ @@ -22505,7 +22775,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQMasked512", + name: "VPMAXSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMAXSQ, @@ -22521,7 +22791,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSQMasked512", + name: "VPMINSQMasked128", argLen: 3, commutative: true, asm: x86.AVPMINSQ, @@ -22537,7 +22807,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULDQMasked512", + name: "VPMULDQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULDQ, @@ -22553,7 +22823,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULLQMasked512", + name: "VPMULLQMasked128", argLen: 3, commutative: true, asm: x86.AVPMULLQ, @@ -22569,173 +22839,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMINSQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMINSQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULDQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPMULLQ512", - argLen: 2, - commutative: true, - asm: x86.AVPMULLQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ512", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ512", - argLen: 2, - asm: x86.AVPSUBQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQ512", - argLen: 2, - commutative: true, - asm: x86.AVPXORQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPABSB128", - argLen: 1, - asm: x86.AVPABSB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPADDB128", - argLen: 2, - commutative: true, - asm: x86.AVPADDB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPAND128", - argLen: 2, - commutative: true, - asm: x86.AVPAND, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPEQB128", - argLen: 2, + name: "VPORQMasked128", + argLen: 3, commutative: true, - asm: x86.AVPCMPEQB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPCMPGTB128", - argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22743,9 +22855,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked128", + name: "VPOPCNTQMasked128", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22757,10 +22869,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDB, + name: "VPSUBQMasked128", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22773,10 +22884,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked128", + name: "VPXORQMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22789,30 +22900,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked128", - argLen: 3, + name: "VPMAXSQ128", + argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBSBMasked128", - argLen: 3, - asm: x86.AVPSUBSB, - reg: regInfo{ - inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22820,10 +22915,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB128", + name: "VPMINSQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22835,10 +22930,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB128", + name: "VPMULLQ128", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22850,13 +22945,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB128", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPOPCNTQ128", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22864,9 +22958,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB128", + name: "VPSUBQ128", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22878,9 +22972,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB256", + name: "VPABSQ256", argLen: 1, - asm: x86.AVPABSB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22891,10 +22985,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB256", + name: "VPADDQ256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22906,10 +23000,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN256", + name: "VPCMPEQQ256", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22921,10 +23015,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPEQB256", - argLen: 2, - commutative: true, - asm: x86.AVPCMPEQB, + name: "VPCMPGTQ256", + argLen: 2, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22936,13 +23029,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPGTB256", + name: "VPABSQMasked256", argLen: 2, - asm: x86.AVPCMPGTB, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22950,13 +23043,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked256", - argLen: 2, - asm: x86.AVPABSB, + name: "VPADDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -22964,10 +23059,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked256", + name: "VPANDQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22980,10 +23075,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked256", + name: "VPANDNQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -22996,9 +23091,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked256", + name: "VPCMPEQQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPCMPEQQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPGTQMasked256", argLen: 3, - asm: x86.AVPSUBSB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23006,19 +23117,20 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXSB256", - argLen: 2, + name: "VPMAXSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23026,14 +23138,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB256", - argLen: 2, + name: "VPMINSQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23041,12 +23154,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB256", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMULDQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23054,13 +23170,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSIGNB256", - argLen: 2, - asm: x86.AVPSIGNB, + name: "VPMULLQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23068,12 +23186,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSB512", - argLen: 1, - asm: x86.AVPABSB, + name: "VPORQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23081,9 +23202,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPABSBMasked512", + name: "VPOPCNTQMasked256", argLen: 2, - asm: x86.AVPABSB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23095,10 +23216,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSBMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXSB, + name: "VPSUBQMasked256", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23111,10 +23231,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSBMasked512", + name: "VPXORQMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23127,15 +23247,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked512", - argLen: 3, + name: "VPMAXSQ256", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23143,10 +23262,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXSB512", + name: "VPMINSQ256", argLen: 2, commutative: true, - asm: x86.AVPMAXSB, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23158,10 +23277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINSB512", + name: "VPMULLQ256", argLen: 2, commutative: true, - asm: x86.AVPMINSB, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23173,9 +23292,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB512", + name: "VPOPCNTQ256", argLen: 1, - asm: x86.AVPOPCNTB, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23186,42 +23305,114 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB512", + name: "VPSUBQ256", argLen: 2, - asm: x86.AVPSUBSB, + asm: x86.AVPSUBQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPABSQ512", + argLen: 1, + asm: x86.AVPABSQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPANDNQ512", + argLen: 2, + commutative: true, + asm: x86.AVPANDNQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPCMPEQQ512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPSUBB512", + name: "VPCMPGTQ512", argLen: 2, - asm: x86.AVPSUBB, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGW256", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPABSQMasked512", + argLen: 2, + asm: x86.AVPABSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23229,10 +23420,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked256", + name: "VPADDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPADDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23245,10 +23436,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked256", + name: "VPANDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPANDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23261,10 +23452,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked256", + name: "VPANDNQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPANDNQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23277,10 +23468,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked256", + name: "VPCMPEQQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23288,33 +23479,35 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTWMasked256", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPCMPGTQMasked512", + argLen: 3, + asm: x86.AVPCMPGTQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUW256", - argLen: 2, + name: "VPMAXSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23322,14 +23515,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW256", - argLen: 2, + name: "VPMINSQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23337,14 +23531,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW256", - argLen: 2, + name: "VPMULDQMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23352,13 +23547,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW256", - argLen: 2, - asm: x86.AVPHADDW, + name: "VPMULLQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23366,12 +23563,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW256", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPORQMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23379,14 +23579,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW256", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQMasked512", + argLen: 2, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23394,14 +23593,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW512", - argLen: 2, - commutative: true, - asm: x86.AVPAVGW, + name: "VPSUBQMasked512", + argLen: 3, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23409,10 +23608,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDWMasked512", + name: "VPXORQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDW, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23425,15 +23624,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGWMasked512", - argLen: 3, + name: "VPMAXSQ512", + argLen: 2, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23441,15 +23639,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked512", - argLen: 3, + name: "VPMINSQ512", + argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23457,15 +23654,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked512", - argLen: 3, + name: "VPMULDQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMULDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23473,15 +23669,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked512", - argLen: 3, + name: "VPMULLQ512", + argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPMULLQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23489,13 +23684,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTWMasked512", - argLen: 2, - asm: x86.AVPOPCNTW, + name: "VPORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPORQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23503,15 +23699,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPOPCNTQ512", + argLen: 1, + asm: x86.AVPOPCNTQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23519,14 +23712,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSWMasked512", - argLen: 3, - asm: x86.AVPSUBSW, + name: "VPSUBQ512", + argLen: 2, + asm: x86.AVPSUBQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23534,14 +23726,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked512", - argLen: 3, - asm: x86.AVPSUBW, + name: "VPXORQ512", + argLen: 2, + commutative: true, + asm: x86.AVPXORQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23549,14 +23741,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUW, + name: "VPABSB128", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23564,10 +23754,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW512", + name: "VPADDB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23579,10 +23769,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW512", + name: "VPCMPEQB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23594,12 +23784,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW512", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPCMPGTB128", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23607,14 +23798,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW512", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPABSBMasked128", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23622,13 +23812,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW512", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDBMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23636,25 +23828,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGW128", - argLen: 2, + name: "VPCMPEQBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDW, + name: "VPCMPGTBMasked128", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23662,15 +23854,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPAVGWMasked128", + name: "VPMAXSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPAVGW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23683,10 +23875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUWMasked128", + name: "VPMINSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23699,15 +23891,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUW, + name: "VPOPCNTBMasked128", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23715,10 +23905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUWMasked128", + name: "VPADDSBMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23731,10 +23921,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSWMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBSBMasked128", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23747,9 +23936,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBWMasked128", + name: "VPSUBBMasked128", argLen: 3, - asm: x86.AVPSUBW, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23762,10 +23951,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUW128", + name: "VPMAXSB128", argLen: 2, commutative: true, - asm: x86.AVPMAXUW, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23777,10 +23966,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUW128", + name: "VPMINSB128", argLen: 2, commutative: true, - asm: x86.AVPMINUW, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23792,10 +23981,23 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULHUW128", + name: "VPOPCNTB128", + argLen: 1, + asm: x86.AVPOPCNTB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPADDSB128", argLen: 2, commutative: true, - asm: x86.AVPMULHUW, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23807,9 +24009,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDW128", + name: "VPSUBSB128", argLen: 2, - asm: x86.AVPHADDW, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23821,12 +24023,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTW128", - argLen: 1, - asm: x86.AVPOPCNTW, + name: "VPSIGNB128", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23834,10 +24037,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSW128", - argLen: 2, - commutative: true, - asm: x86.AVPADDSW, + name: "VPSUBB128", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23849,13 +24051,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSW128", - argLen: 2, - asm: x86.AVPSUBSW, + name: "VPABSB256", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23863,9 +24064,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBW128", - argLen: 2, - asm: x86.AVPSUBW, + name: "VPADDB256", + argLen: 2, + commutative: true, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23877,10 +24079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD512", + name: "VPCMPEQB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23892,10 +24094,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDND512", - argLen: 2, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTB256", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23907,15 +24108,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked256", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -23923,10 +24122,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked512", + name: "VPADDBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23939,10 +24138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked512", + name: "VPCMPEQBMasked256", argLen: 3, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23950,15 +24149,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked512", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPCMPGTBMasked256", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23966,15 +24164,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMINUDMasked512", + name: "VPMAXSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -23987,10 +24185,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORDMasked512", + name: "VPMINSBMasked256", argLen: 3, commutative: true, - asm: x86.AVPORD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24003,14 +24201,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD512", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPOPCNTBMasked256", + argLen: 2, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24018,14 +24215,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD512", - argLen: 2, + name: "VPADDSBMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24033,22 +24231,40 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTD512", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPSUBSBMasked256", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPSUBBMasked256", + argLen: 3, + asm: x86.AVPSUBB, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPSUBD512", - argLen: 2, - asm: x86.AVPSUBD, + name: "VPMAXSB256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24060,10 +24276,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDD128", + name: "VPMINSB256", argLen: 2, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24075,15 +24291,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDD, + name: "VPOPCNTB256", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24091,15 +24304,28 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNDMasked128", - argLen: 3, + name: "VPADDSB256", + argLen: 2, commutative: true, - asm: x86.AVPANDND, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPSUBSB256", + argLen: 2, + asm: x86.AVPSUBSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24107,15 +24333,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSIGNB256", + argLen: 2, + asm: x86.AVPSIGNB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24123,15 +24347,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBB256", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24139,14 +24361,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD128", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPABSB512", + argLen: 1, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24154,10 +24374,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD128", + name: "VPADDB512", argLen: 2, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24169,41 +24389,42 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD128", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPCMPEQB512", + argLen: 2, + commutative: true, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPOPCNTD128", - argLen: 1, - asm: x86.AVPOPCNTD, + name: "VPCMPGTB512", + argLen: 2, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPADDD256", - argLen: 2, - commutative: true, - asm: x86.AVPADDD, + name: "VPABSBMasked512", + argLen: 2, + asm: x86.AVPABSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24211,10 +24432,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDDMasked256", + name: "VPADDBMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDD, + asm: x86.AVPADDB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24227,10 +24448,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDDMasked256", + name: "VPCMPEQBMasked512", argLen: 3, commutative: true, - asm: x86.AVPANDD, + asm: x86.AVPCMPEQB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24238,15 +24459,14 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPANDNDMasked256", - argLen: 3, - commutative: true, - asm: x86.AVPANDND, + name: "VPCMPGTBMasked512", + argLen: 3, + asm: x86.AVPCMPGTB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24254,15 +24474,15 @@ var opcodeTable = [...]opInfo{ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 }, }, }, { - name: "VPMAXUDMasked256", + name: "VPMAXSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUD, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24275,10 +24495,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUDMasked256", + name: "VPMINSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUD, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24291,9 +24511,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTDMasked256", + name: "VPOPCNTBMasked512", argLen: 2, - asm: x86.AVPOPCNTD, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24305,10 +24525,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORDMasked256", + name: "VPADDSBMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORD, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24321,14 +24541,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMAXUD, + name: "VPSUBSBMasked512", + argLen: 3, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24336,14 +24556,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUD256", - argLen: 2, - commutative: true, - asm: x86.AVPMINUD, + name: "VPSUBBMasked512", + argLen: 3, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24351,10 +24571,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ256", + name: "VPMAXSB512", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24366,9 +24586,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPHADDD256", - argLen: 2, - asm: x86.AVPHADDD, + name: "VPMINSB512", + argLen: 2, + commutative: true, + asm: x86.AVPMINSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24380,14 +24601,12 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR256", - argLen: 2, - commutative: true, - asm: x86.AVPXOR, + name: "VPOPCNTB512", + argLen: 1, + asm: x86.AVPOPCNTB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24395,10 +24614,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ128", + name: "VPADDSB512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPADDSB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24410,15 +24629,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPADDQ, + name: "VPSUBSB512", + argLen: 2, + asm: x86.AVPSUBSB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24426,15 +24643,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked128", - argLen: 3, - commutative: true, - asm: x86.AVPMAXUQ, + name: "VPSUBB512", + argLen: 2, + asm: x86.AVPSUBB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24442,15 +24657,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked128", - argLen: 3, + name: "VPAVGW256", + argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24458,10 +24672,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked128", + name: "VPAVGWMasked256", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24474,10 +24688,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked128", + name: "VPMAXUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24490,24 +24704,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked128", - argLen: 2, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPXORQMasked128", + name: "VPMINUWMasked256", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24520,14 +24720,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ128", - argLen: 2, + name: "VPMULHUWMasked256", + argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24535,10 +24736,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ128", + name: "VPMAXUW256", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24550,37 +24751,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ128", + name: "VPMINUW256", argLen: 2, commutative: true, - asm: x86.AVPMULUDQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPOPCNTQ128", - argLen: 1, - asm: x86.AVPOPCNTQ, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - }, - }, - { - name: "VPSUBQ128", - argLen: 2, - asm: x86.AVPSUBQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24592,10 +24766,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXOR128", + name: "VPMULHUW256", argLen: 2, commutative: true, - asm: x86.AVPXOR, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24607,15 +24781,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQMasked256", - argLen: 3, + name: "VPAVGW512", + argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24623,10 +24796,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked256", + name: "VPAVGWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24639,10 +24812,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked256", + name: "VPMAXUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24655,10 +24828,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked256", + name: "VPMINUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24671,10 +24844,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked256", + name: "VPMULHUWMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24687,10 +24860,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ256", + name: "VPMAXUW512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24702,10 +24875,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ256", + name: "VPMINUW512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24717,10 +24890,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDQ512", + name: "VPMULHUW512", argLen: 2, commutative: true, - asm: x86.AVPADDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24732,10 +24905,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDNQ512", + name: "VPAVGW128", argLen: 2, commutative: true, - asm: x86.AVPANDNQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24747,10 +24920,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDQMasked512", + name: "VPAVGWMasked128", argLen: 3, commutative: true, - asm: x86.AVPANDQ, + asm: x86.AVPAVGW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24763,10 +24936,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQMasked512", + name: "VPMAXUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24779,10 +24952,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQMasked512", + name: "VPMINUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24795,10 +24968,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQMasked512", + name: "VPMULHUWMasked128", argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMULHUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24811,15 +24984,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQMasked512", - argLen: 3, + name: "VPMAXUW128", + argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24827,13 +24999,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTQMasked512", - argLen: 2, - asm: x86.AVPOPCNTQ, + name: "VPMINUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUW, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24841,9 +25014,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBQMasked512", - argLen: 3, - asm: x86.AVPSUBQ, + name: "VPMULHUW128", + argLen: 2, + commutative: true, + asm: x86.AVPMULHUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUDMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24856,10 +25045,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPXORQMasked512", + name: "VPMINUDMasked512", argLen: 3, commutative: true, - asm: x86.AVPXORQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24872,10 +25061,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUQ512", + name: "VPMAXUD512", argLen: 2, commutative: true, - asm: x86.AVPMAXUQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24887,10 +25076,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUQ512", + name: "VPMINUD512", argLen: 2, commutative: true, - asm: x86.AVPMINUQ, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24902,14 +25091,31 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMULUDQ512", - argLen: 2, + name: "VPMAXUDMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMULUDQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMINUDMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMINUD, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24917,10 +25123,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPORQ512", + name: "VPMAXUD128", argLen: 2, commutative: true, - asm: x86.AVPORQ, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24932,10 +25138,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPANDN128", + name: "VPMINUD128", argLen: 2, commutative: true, - asm: x86.AVPANDN, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24947,10 +25153,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB128", + name: "VPMULUDQ128", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -24962,10 +25168,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked128", + name: "VPMAXUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24978,10 +25184,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked128", + name: "VPMINUDMasked256", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -24994,15 +25200,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked128", - argLen: 3, + name: "VPMAXUD256", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25010,13 +25215,45 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked128", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUD256", + argLen: 2, + commutative: true, + asm: x86.AVPMINUD, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMULUDQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMULUDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + }, + }, + { + name: "VPMAXUQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25024,10 +25261,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked128", + name: "VPMINUQMasked128", argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25040,9 +25277,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked128", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMULUDQMasked128", + argLen: 3, + commutative: true, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25055,10 +25293,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB128", + name: "VPMAXUQ128", argLen: 2, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25070,10 +25308,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB128", + name: "VPMINUQ128", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25085,27 +25323,47 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTB128", - argLen: 1, - asm: x86.AVPOPCNTB, + name: "VPMAXUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, + }, + }, + { + name: "VPMINUQMasked256", + argLen: 3, + commutative: true, + asm: x86.AVPMINUQ, + reg: regInfo{ + inputs: []inputInfo{ + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, }, }, { - name: "VPADDSB128", - argLen: 2, + name: "VPMULUDQMasked256", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25113,9 +25371,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB128", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUQ256", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25127,10 +25386,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB256", + name: "VPMINUQ256", argLen: 2, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25142,10 +25401,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked256", + name: "VPMAXUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25158,10 +25417,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked256", + name: "VPMINUQMasked512", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25174,10 +25433,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked256", + name: "VPMULUDQMasked512", argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25190,15 +25449,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked256", - argLen: 3, + name: "VPMAXUQ512", + argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25206,13 +25464,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked256", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPMINUQ512", + argLen: 2, + commutative: true, + asm: x86.AVPMINUQ, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25220,15 +25479,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSBMasked256", - argLen: 3, + name: "VPMULUDQ512", + argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMULUDQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25236,14 +25494,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked256", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPAVGB128", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25251,14 +25509,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB256", - argLen: 2, + name: "VPAVGBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25266,14 +25525,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB256", - argLen: 2, + name: "VPMAXUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25281,14 +25541,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB256", - argLen: 2, + name: "VPMINUBMasked128", + argLen: 3, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25296,9 +25557,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSB256", - argLen: 2, - asm: x86.AVPSUBSB, + name: "VPMAXUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25310,9 +25572,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBB256", - argLen: 2, - asm: x86.AVPSUBB, + name: "VPMINUB128", + argLen: 2, + commutative: true, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25324,10 +25587,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDB512", + name: "VPAVGB256", argLen: 2, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25339,14 +25602,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGB512", - argLen: 2, + name: "VPAVGBMasked256", + argLen: 3, commutative: true, asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25354,10 +25618,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDBMasked512", + name: "VPMAXUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPADDB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25370,10 +25634,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPAVGBMasked512", + name: "VPMINUBMasked256", argLen: 3, commutative: true, - asm: x86.AVPAVGB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25386,15 +25650,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUBMasked512", - argLen: 3, + name: "VPMAXUB256", + argLen: 2, commutative: true, asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25402,15 +25665,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUBMasked512", - argLen: 3, + name: "VPMINUB256", + argLen: 2, commutative: true, asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25418,13 +25680,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPOPCNTBMasked512", - argLen: 2, - asm: x86.AVPOPCNTB, + name: "VPAVGB512", + argLen: 2, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ - {1, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25432,9 +25695,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBSBMasked512", - argLen: 3, - asm: x86.AVPSUBSB, + name: "VPAVGBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPAVGB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25447,9 +25711,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPSUBBMasked512", - argLen: 3, - asm: x86.AVPSUBB, + name: "VPMAXUBMasked512", + argLen: 3, + commutative: true, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25462,14 +25727,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMAXUB512", - argLen: 2, + name: "VPMINUBMasked512", + argLen: 3, commutative: true, - asm: x86.AVPMAXUB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25477,10 +25743,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPMINUB512", + name: "VPMAXUB512", argLen: 2, commutative: true, - asm: x86.AVPMINUB, + asm: x86.AVPMAXUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25492,10 +25758,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPADDSB512", + name: "VPMINUB512", argLen: 2, commutative: true, - asm: x86.AVPADDSB, + asm: x86.AVPMINUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25507,10 +25773,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25522,10 +25789,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25538,10 +25806,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25553,10 +25822,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25569,10 +25839,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPS256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPS, + name: "VCMPPS256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25584,10 +25855,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPSMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVCMPPS, + name: "VCMPPSMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVCMPPS, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25600,10 +25872,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25632,10 +25905,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25664,10 +25938,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VCMPPD512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVCMPPD, + name: "VCMPPD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVCMPPD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25727,15 +26002,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked512", + name: "VPCMPW512", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25743,15 +26017,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPW512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25774,11 +26048,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPWMasked128", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPW, + name: "VPCMPWMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25806,11 +26079,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25823,15 +26095,14 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked128", + name: "VPCMPD128", auxType: auxInt8, - argLen: 3, + argLen: 2, asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25839,15 +26110,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPD128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked128", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25870,11 +26141,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPDMasked256", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPD, + name: "VPCMPDMasked256", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25949,11 +26219,25 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQMasked512", - auxType: auxInt8, - argLen: 3, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPQ512", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + }, + outputs: []outputInfo{ + {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + }, + }, + }, + { + name: "VPCMPQMasked512", + auxType: auxInt8, + argLen: 3, + asm: x86.AVPCMPQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -25966,11 +26250,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPQ512", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPQ, + name: "VPCMPB128", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -25998,11 +26281,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPB128", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, + name: "VPCMPB256", + auxType: auxInt8, + argLen: 2, + asm: x86.AVPCMPB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26029,22 +26311,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "VPCMPB256", - auxType: auxInt8, - argLen: 2, - commutative: true, - asm: x86.AVPCMPB, - reg: regInfo{ - inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - }, - outputs: []outputInfo{ - {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - }, - }, - }, { name: "VPCMPB512", auxType: auxInt8, @@ -26077,10 +26343,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26092,10 +26359,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26124,10 +26392,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUWMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUW, + name: "VPCMPUWMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26140,10 +26409,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUW128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUW, + name: "VPCMPUW128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUW, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26172,15 +26442,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26188,15 +26458,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD512", + name: "VPCMPUDMasked512", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26204,10 +26475,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUD, + name: "VPCMPUD128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26219,10 +26491,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUDMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26235,15 +26508,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUDMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUD, + name: "VPCMPUD256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26251,15 +26524,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUD256", + name: "VPCMPUDMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUD, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26267,10 +26541,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26282,10 +26557,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26298,15 +26574,15 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26314,15 +26590,16 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ256", + name: "VPCMPUQMasked256", auxType: auxInt8, - argLen: 2, + argLen: 3, commutative: true, asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ - {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 - {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 + {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 + {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 }, outputs: []outputInfo{ {0, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26330,10 +26607,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQ512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUQ, + name: "VPCMPUQ512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26345,10 +26623,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUQMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUQ, + name: "VPCMPUQMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUQ, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26361,10 +26640,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB128", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB128", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26376,10 +26656,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked128", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked128", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26392,10 +26673,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB256", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB256", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26407,10 +26689,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked256", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked256", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 @@ -26423,10 +26706,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUB512", - auxType: auxInt8, - argLen: 2, - asm: x86.AVPCMPUB, + name: "VPCMPUB512", + auxType: auxInt8, + argLen: 2, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 @@ -26438,10 +26722,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "VPCMPUBMasked512", - auxType: auxInt8, - argLen: 3, - asm: x86.AVPCMPUB, + name: "VPCMPUBMasked512", + auxType: auxInt8, + argLen: 3, + commutative: true, + asm: x86.AVPCMPUB, reg: regInfo{ inputs: []inputInfo{ {2, 1090921693184}, // K1 K2 K3 K4 K5 K6 K7 diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 88c90dce82..86fbc988cf 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -554,29 +554,41 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64XORQmodify: return rewriteValueAMD64_OpAMD64XORQmodify(v) case OpAbsoluteInt16x16: - return rewriteValueAMD64_OpAbsoluteInt16x16(v) + v.Op = OpAMD64VPABSW256 + return true case OpAbsoluteInt16x32: - return rewriteValueAMD64_OpAbsoluteInt16x32(v) + v.Op = OpAMD64VPABSW512 + return true case OpAbsoluteInt16x8: - return rewriteValueAMD64_OpAbsoluteInt16x8(v) + v.Op = OpAMD64VPABSW128 + return true case OpAbsoluteInt32x16: - return rewriteValueAMD64_OpAbsoluteInt32x16(v) + v.Op = OpAMD64VPABSD512 + return true case OpAbsoluteInt32x4: - return rewriteValueAMD64_OpAbsoluteInt32x4(v) + v.Op = OpAMD64VPABSD128 + return true case OpAbsoluteInt32x8: - return rewriteValueAMD64_OpAbsoluteInt32x8(v) + v.Op = OpAMD64VPABSD256 + return true case OpAbsoluteInt64x2: - return rewriteValueAMD64_OpAbsoluteInt64x2(v) + v.Op = OpAMD64VPABSQ128 + return true case OpAbsoluteInt64x4: - return rewriteValueAMD64_OpAbsoluteInt64x4(v) + v.Op = OpAMD64VPABSQ256 + return true case OpAbsoluteInt64x8: - return rewriteValueAMD64_OpAbsoluteInt64x8(v) + v.Op = OpAMD64VPABSQ512 + return true case OpAbsoluteInt8x16: - return rewriteValueAMD64_OpAbsoluteInt8x16(v) + v.Op = OpAMD64VPABSB128 + return true case OpAbsoluteInt8x32: - return rewriteValueAMD64_OpAbsoluteInt8x32(v) + v.Op = OpAMD64VPABSB256 + return true case OpAbsoluteInt8x64: - return rewriteValueAMD64_OpAbsoluteInt8x64(v) + v.Op = OpAMD64VPABSB512 + return true case OpAdd16: v.Op = OpAMD64ADDL return true @@ -596,68 +608,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ADDL return true case OpAddFloat32x16: - return rewriteValueAMD64_OpAddFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpAddFloat32x4: - return rewriteValueAMD64_OpAddFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpAddFloat32x8: - return rewriteValueAMD64_OpAddFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpAddFloat64x2: - return rewriteValueAMD64_OpAddFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpAddFloat64x4: - return rewriteValueAMD64_OpAddFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpAddFloat64x8: - return rewriteValueAMD64_OpAddFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpAddInt16x16: - return rewriteValueAMD64_OpAddInt16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddInt16x32: - return rewriteValueAMD64_OpAddInt16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddInt16x8: - return rewriteValueAMD64_OpAddInt16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddInt32x16: - return rewriteValueAMD64_OpAddInt32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddInt32x4: - return rewriteValueAMD64_OpAddInt32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddInt32x8: - return rewriteValueAMD64_OpAddInt32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddInt64x2: - return rewriteValueAMD64_OpAddInt64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddInt64x4: - return rewriteValueAMD64_OpAddInt64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddInt64x8: - return rewriteValueAMD64_OpAddInt64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddInt8x16: - return rewriteValueAMD64_OpAddInt8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddInt8x32: - return rewriteValueAMD64_OpAddInt8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddInt8x64: - return rewriteValueAMD64_OpAddInt8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddPtr: v.Op = OpAMD64ADDQ return true case OpAddUint16x16: - return rewriteValueAMD64_OpAddUint16x16(v) + v.Op = OpAMD64VPADDW256 + return true case OpAddUint16x32: - return rewriteValueAMD64_OpAddUint16x32(v) + v.Op = OpAMD64VPADDW512 + return true case OpAddUint16x8: - return rewriteValueAMD64_OpAddUint16x8(v) + v.Op = OpAMD64VPADDW128 + return true case OpAddUint32x16: - return rewriteValueAMD64_OpAddUint32x16(v) + v.Op = OpAMD64VPADDD512 + return true case OpAddUint32x4: - return rewriteValueAMD64_OpAddUint32x4(v) + v.Op = OpAMD64VPADDD128 + return true case OpAddUint32x8: - return rewriteValueAMD64_OpAddUint32x8(v) + v.Op = OpAMD64VPADDD256 + return true case OpAddUint64x2: - return rewriteValueAMD64_OpAddUint64x2(v) + v.Op = OpAMD64VPADDQ128 + return true case OpAddUint64x4: - return rewriteValueAMD64_OpAddUint64x4(v) + v.Op = OpAMD64VPADDQ256 + return true case OpAddUint64x8: - return rewriteValueAMD64_OpAddUint64x8(v) + v.Op = OpAMD64VPADDQ512 + return true case OpAddUint8x16: - return rewriteValueAMD64_OpAddUint8x16(v) + v.Op = OpAMD64VPADDB128 + return true case OpAddUint8x32: - return rewriteValueAMD64_OpAddUint8x32(v) + v.Op = OpAMD64VPADDB256 + return true case OpAddUint8x64: - return rewriteValueAMD64_OpAddUint8x64(v) + v.Op = OpAMD64VPADDB512 + return true case OpAddr: return rewriteValueAMD64_OpAddr(v) case OpAnd16: @@ -676,133 +718,197 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ANDL return true case OpAndFloat32x16: - return rewriteValueAMD64_OpAndFloat32x16(v) + v.Op = OpAMD64VANDPS512 + return true case OpAndFloat32x4: - return rewriteValueAMD64_OpAndFloat32x4(v) + v.Op = OpAMD64VANDPS128 + return true case OpAndFloat32x8: - return rewriteValueAMD64_OpAndFloat32x8(v) + v.Op = OpAMD64VANDPS256 + return true case OpAndFloat64x2: - return rewriteValueAMD64_OpAndFloat64x2(v) + v.Op = OpAMD64VANDPD128 + return true case OpAndFloat64x4: - return rewriteValueAMD64_OpAndFloat64x4(v) + v.Op = OpAMD64VANDPD256 + return true case OpAndFloat64x8: - return rewriteValueAMD64_OpAndFloat64x8(v) + v.Op = OpAMD64VANDPD512 + return true case OpAndInt16x16: - return rewriteValueAMD64_OpAndInt16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt16x8: - return rewriteValueAMD64_OpAndInt16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x16: - return rewriteValueAMD64_OpAndInt32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndInt32x4: - return rewriteValueAMD64_OpAndInt32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt32x8: - return rewriteValueAMD64_OpAndInt32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x2: - return rewriteValueAMD64_OpAndInt64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt64x4: - return rewriteValueAMD64_OpAndInt64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndInt64x8: - return rewriteValueAMD64_OpAndInt64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndInt8x16: - return rewriteValueAMD64_OpAndInt8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndInt8x32: - return rewriteValueAMD64_OpAndInt8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpAndNotFloat32x16: - return rewriteValueAMD64_OpAndNotFloat32x16(v) + v.Op = OpAMD64VANDNPS512 + return true case OpAndNotFloat32x4: - return rewriteValueAMD64_OpAndNotFloat32x4(v) + v.Op = OpAMD64VANDNPS128 + return true case OpAndNotFloat32x8: - return rewriteValueAMD64_OpAndNotFloat32x8(v) + v.Op = OpAMD64VANDNPS256 + return true case OpAndNotFloat64x2: - return rewriteValueAMD64_OpAndNotFloat64x2(v) + v.Op = OpAMD64VANDNPD128 + return true case OpAndNotFloat64x4: - return rewriteValueAMD64_OpAndNotFloat64x4(v) + v.Op = OpAMD64VANDNPD256 + return true case OpAndNotFloat64x8: - return rewriteValueAMD64_OpAndNotFloat64x8(v) + v.Op = OpAMD64VANDNPD512 + return true case OpAndNotInt16x16: - return rewriteValueAMD64_OpAndNotInt16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt16x8: - return rewriteValueAMD64_OpAndNotInt16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x16: - return rewriteValueAMD64_OpAndNotInt32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotInt32x4: - return rewriteValueAMD64_OpAndNotInt32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt32x8: - return rewriteValueAMD64_OpAndNotInt32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x2: - return rewriteValueAMD64_OpAndNotInt64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt64x4: - return rewriteValueAMD64_OpAndNotInt64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotInt64x8: - return rewriteValueAMD64_OpAndNotInt64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotInt8x16: - return rewriteValueAMD64_OpAndNotInt8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotInt8x32: - return rewriteValueAMD64_OpAndNotInt8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x16: - return rewriteValueAMD64_OpAndNotUint16x16(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint16x8: - return rewriteValueAMD64_OpAndNotUint16x8(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x16: - return rewriteValueAMD64_OpAndNotUint32x16(v) + v.Op = OpAMD64VPANDND512 + return true case OpAndNotUint32x4: - return rewriteValueAMD64_OpAndNotUint32x4(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint32x8: - return rewriteValueAMD64_OpAndNotUint32x8(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x2: - return rewriteValueAMD64_OpAndNotUint64x2(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint64x4: - return rewriteValueAMD64_OpAndNotUint64x4(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndNotUint64x8: - return rewriteValueAMD64_OpAndNotUint64x8(v) + v.Op = OpAMD64VPANDNQ512 + return true case OpAndNotUint8x16: - return rewriteValueAMD64_OpAndNotUint8x16(v) + v.Op = OpAMD64VPANDN128 + return true case OpAndNotUint8x32: - return rewriteValueAMD64_OpAndNotUint8x32(v) + v.Op = OpAMD64VPANDN256 + return true case OpAndUint16x16: - return rewriteValueAMD64_OpAndUint16x16(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint16x8: - return rewriteValueAMD64_OpAndUint16x8(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x16: - return rewriteValueAMD64_OpAndUint32x16(v) + v.Op = OpAMD64VPANDD512 + return true case OpAndUint32x4: - return rewriteValueAMD64_OpAndUint32x4(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint32x8: - return rewriteValueAMD64_OpAndUint32x8(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x2: - return rewriteValueAMD64_OpAndUint64x2(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint64x4: - return rewriteValueAMD64_OpAndUint64x4(v) + v.Op = OpAMD64VPAND256 + return true case OpAndUint64x8: - return rewriteValueAMD64_OpAndUint64x8(v) + v.Op = OpAMD64VPANDQ512 + return true case OpAndUint8x16: - return rewriteValueAMD64_OpAndUint8x16(v) + v.Op = OpAMD64VPAND128 + return true case OpAndUint8x32: - return rewriteValueAMD64_OpAndUint8x32(v) + v.Op = OpAMD64VPAND256 + return true case OpApproximateReciprocalFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v) + v.Op = OpAMD64VRCP14PS512 + return true case OpApproximateReciprocalFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v) + v.Op = OpAMD64VRCP14PS128 + return true case OpApproximateReciprocalFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v) + v.Op = OpAMD64VRCP14PS256 + return true case OpApproximateReciprocalFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v) + v.Op = OpAMD64VRCP14PD128 + return true case OpApproximateReciprocalFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v) + v.Op = OpAMD64VRCP14PD256 + return true case OpApproximateReciprocalFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v) + v.Op = OpAMD64VRCP14PD512 + return true case OpApproximateReciprocalOfSqrtFloat32x16: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v) + v.Op = OpAMD64VRSQRT14PS512 + return true case OpApproximateReciprocalOfSqrtFloat32x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v) + v.Op = OpAMD64VRSQRTPS128 + return true case OpApproximateReciprocalOfSqrtFloat32x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v) + v.Op = OpAMD64VRSQRTPS256 + return true case OpApproximateReciprocalOfSqrtFloat64x2: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v) + v.Op = OpAMD64VRSQRT14PD128 + return true case OpApproximateReciprocalOfSqrtFloat64x4: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v) + v.Op = OpAMD64VRSQRT14PD256 + return true case OpApproximateReciprocalOfSqrtFloat64x8: - return rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v) + v.Op = OpAMD64VRSQRT14PD512 + return true case OpAtomicAdd32: return rewriteValueAMD64_OpAtomicAdd32(v) case OpAtomicAdd64: @@ -850,17 +956,23 @@ func rewriteValueAMD64(v *Value) bool { case OpAtomicStorePtrNoWB: return rewriteValueAMD64_OpAtomicStorePtrNoWB(v) case OpAverageUint16x16: - return rewriteValueAMD64_OpAverageUint16x16(v) + v.Op = OpAMD64VPAVGW256 + return true case OpAverageUint16x32: - return rewriteValueAMD64_OpAverageUint16x32(v) + v.Op = OpAMD64VPAVGW512 + return true case OpAverageUint16x8: - return rewriteValueAMD64_OpAverageUint16x8(v) + v.Op = OpAMD64VPAVGW128 + return true case OpAverageUint8x16: - return rewriteValueAMD64_OpAverageUint8x16(v) + v.Op = OpAMD64VPAVGB128 + return true case OpAverageUint8x32: - return rewriteValueAMD64_OpAverageUint8x32(v) + v.Op = OpAMD64VPAVGB256 + return true case OpAverageUint8x64: - return rewriteValueAMD64_OpAverageUint8x64(v) + v.Op = OpAMD64VPAVGB512 + return true case OpAvg64u: v.Op = OpAMD64AVGQU return true @@ -994,17 +1106,23 @@ func rewriteValueAMD64(v *Value) bool { case OpDiv8u: return rewriteValueAMD64_OpDiv8u(v) case OpDivFloat32x16: - return rewriteValueAMD64_OpDivFloat32x16(v) + v.Op = OpAMD64VDIVPS512 + return true case OpDivFloat32x4: - return rewriteValueAMD64_OpDivFloat32x4(v) + v.Op = OpAMD64VDIVPS128 + return true case OpDivFloat32x8: - return rewriteValueAMD64_OpDivFloat32x8(v) + v.Op = OpAMD64VDIVPS256 + return true case OpDivFloat64x2: - return rewriteValueAMD64_OpDivFloat64x2(v) + v.Op = OpAMD64VDIVPD128 + return true case OpDivFloat64x4: - return rewriteValueAMD64_OpDivFloat64x4(v) + v.Op = OpAMD64VDIVPD256 + return true case OpDivFloat64x8: - return rewriteValueAMD64_OpDivFloat64x8(v) + v.Op = OpAMD64VDIVPD512 + return true case OpEq16: return rewriteValueAMD64_OpEq16(v) case OpEq32: @@ -1034,27 +1152,35 @@ func rewriteValueAMD64(v *Value) bool { case OpEqualFloat64x8: return rewriteValueAMD64_OpEqualFloat64x8(v) case OpEqualInt16x16: - return rewriteValueAMD64_OpEqualInt16x16(v) + v.Op = OpAMD64VPCMPEQW256 + return true case OpEqualInt16x32: return rewriteValueAMD64_OpEqualInt16x32(v) case OpEqualInt16x8: - return rewriteValueAMD64_OpEqualInt16x8(v) + v.Op = OpAMD64VPCMPEQW128 + return true case OpEqualInt32x16: return rewriteValueAMD64_OpEqualInt32x16(v) case OpEqualInt32x4: - return rewriteValueAMD64_OpEqualInt32x4(v) + v.Op = OpAMD64VPCMPEQD128 + return true case OpEqualInt32x8: - return rewriteValueAMD64_OpEqualInt32x8(v) + v.Op = OpAMD64VPCMPEQD256 + return true case OpEqualInt64x2: - return rewriteValueAMD64_OpEqualInt64x2(v) + v.Op = OpAMD64VPCMPEQQ128 + return true case OpEqualInt64x4: - return rewriteValueAMD64_OpEqualInt64x4(v) + v.Op = OpAMD64VPCMPEQQ256 + return true case OpEqualInt64x8: return rewriteValueAMD64_OpEqualInt64x8(v) case OpEqualInt8x16: - return rewriteValueAMD64_OpEqualInt8x16(v) + v.Op = OpAMD64VPCMPEQB128 + return true case OpEqualInt8x32: - return rewriteValueAMD64_OpEqualInt8x32(v) + v.Op = OpAMD64VPCMPEQB256 + return true case OpEqualInt8x64: return rewriteValueAMD64_OpEqualInt8x64(v) case OpEqualUint16x16: @@ -1169,27 +1295,34 @@ func rewriteValueAMD64(v *Value) bool { case OpGreaterFloat64x8: return rewriteValueAMD64_OpGreaterFloat64x8(v) case OpGreaterInt16x16: - return rewriteValueAMD64_OpGreaterInt16x16(v) + v.Op = OpAMD64VPCMPGTW256 + return true case OpGreaterInt16x32: return rewriteValueAMD64_OpGreaterInt16x32(v) case OpGreaterInt16x8: - return rewriteValueAMD64_OpGreaterInt16x8(v) + v.Op = OpAMD64VPCMPGTW128 + return true case OpGreaterInt32x16: return rewriteValueAMD64_OpGreaterInt32x16(v) case OpGreaterInt32x4: - return rewriteValueAMD64_OpGreaterInt32x4(v) + v.Op = OpAMD64VPCMPGTD128 + return true case OpGreaterInt32x8: - return rewriteValueAMD64_OpGreaterInt32x8(v) + v.Op = OpAMD64VPCMPGTD256 + return true case OpGreaterInt64x2: return rewriteValueAMD64_OpGreaterInt64x2(v) case OpGreaterInt64x4: - return rewriteValueAMD64_OpGreaterInt64x4(v) + v.Op = OpAMD64VPCMPGTQ256 + return true case OpGreaterInt64x8: return rewriteValueAMD64_OpGreaterInt64x8(v) case OpGreaterInt8x16: - return rewriteValueAMD64_OpGreaterInt8x16(v) + v.Op = OpAMD64VPCMPGTB128 + return true case OpGreaterInt8x32: - return rewriteValueAMD64_OpGreaterInt8x32(v) + v.Op = OpAMD64VPCMPGTB256 + return true case OpGreaterInt8x64: return rewriteValueAMD64_OpGreaterInt8x64(v) case OpGreaterUint16x16: @@ -2454,129 +2587,189 @@ func rewriteValueAMD64(v *Value) bool { case OpMax64F: return rewriteValueAMD64_OpMax64F(v) case OpMaxFloat32x16: - return rewriteValueAMD64_OpMaxFloat32x16(v) + v.Op = OpAMD64VMAXPS512 + return true case OpMaxFloat32x4: - return rewriteValueAMD64_OpMaxFloat32x4(v) + v.Op = OpAMD64VMAXPS128 + return true case OpMaxFloat32x8: - return rewriteValueAMD64_OpMaxFloat32x8(v) + v.Op = OpAMD64VMAXPS256 + return true case OpMaxFloat64x2: - return rewriteValueAMD64_OpMaxFloat64x2(v) + v.Op = OpAMD64VMAXPD128 + return true case OpMaxFloat64x4: - return rewriteValueAMD64_OpMaxFloat64x4(v) + v.Op = OpAMD64VMAXPD256 + return true case OpMaxFloat64x8: - return rewriteValueAMD64_OpMaxFloat64x8(v) + v.Op = OpAMD64VMAXPD512 + return true case OpMaxInt16x16: - return rewriteValueAMD64_OpMaxInt16x16(v) + v.Op = OpAMD64VPMAXSW256 + return true case OpMaxInt16x32: - return rewriteValueAMD64_OpMaxInt16x32(v) + v.Op = OpAMD64VPMAXSW512 + return true case OpMaxInt16x8: - return rewriteValueAMD64_OpMaxInt16x8(v) + v.Op = OpAMD64VPMAXSW128 + return true case OpMaxInt32x16: - return rewriteValueAMD64_OpMaxInt32x16(v) + v.Op = OpAMD64VPMAXSD512 + return true case OpMaxInt32x4: - return rewriteValueAMD64_OpMaxInt32x4(v) + v.Op = OpAMD64VPMAXSD128 + return true case OpMaxInt32x8: - return rewriteValueAMD64_OpMaxInt32x8(v) + v.Op = OpAMD64VPMAXSD256 + return true case OpMaxInt64x2: - return rewriteValueAMD64_OpMaxInt64x2(v) + v.Op = OpAMD64VPMAXSQ128 + return true case OpMaxInt64x4: - return rewriteValueAMD64_OpMaxInt64x4(v) + v.Op = OpAMD64VPMAXSQ256 + return true case OpMaxInt64x8: - return rewriteValueAMD64_OpMaxInt64x8(v) + v.Op = OpAMD64VPMAXSQ512 + return true case OpMaxInt8x16: - return rewriteValueAMD64_OpMaxInt8x16(v) + v.Op = OpAMD64VPMAXSB128 + return true case OpMaxInt8x32: - return rewriteValueAMD64_OpMaxInt8x32(v) + v.Op = OpAMD64VPMAXSB256 + return true case OpMaxInt8x64: - return rewriteValueAMD64_OpMaxInt8x64(v) + v.Op = OpAMD64VPMAXSB512 + return true case OpMaxUint16x16: - return rewriteValueAMD64_OpMaxUint16x16(v) + v.Op = OpAMD64VPMAXUW256 + return true case OpMaxUint16x32: - return rewriteValueAMD64_OpMaxUint16x32(v) + v.Op = OpAMD64VPMAXUW512 + return true case OpMaxUint16x8: - return rewriteValueAMD64_OpMaxUint16x8(v) + v.Op = OpAMD64VPMAXUW128 + return true case OpMaxUint32x16: - return rewriteValueAMD64_OpMaxUint32x16(v) + v.Op = OpAMD64VPMAXUD512 + return true case OpMaxUint32x4: - return rewriteValueAMD64_OpMaxUint32x4(v) + v.Op = OpAMD64VPMAXUD128 + return true case OpMaxUint32x8: - return rewriteValueAMD64_OpMaxUint32x8(v) + v.Op = OpAMD64VPMAXUD256 + return true case OpMaxUint64x2: - return rewriteValueAMD64_OpMaxUint64x2(v) + v.Op = OpAMD64VPMAXUQ128 + return true case OpMaxUint64x4: - return rewriteValueAMD64_OpMaxUint64x4(v) + v.Op = OpAMD64VPMAXUQ256 + return true case OpMaxUint64x8: - return rewriteValueAMD64_OpMaxUint64x8(v) + v.Op = OpAMD64VPMAXUQ512 + return true case OpMaxUint8x16: - return rewriteValueAMD64_OpMaxUint8x16(v) + v.Op = OpAMD64VPMAXUB128 + return true case OpMaxUint8x32: - return rewriteValueAMD64_OpMaxUint8x32(v) + v.Op = OpAMD64VPMAXUB256 + return true case OpMaxUint8x64: - return rewriteValueAMD64_OpMaxUint8x64(v) + v.Op = OpAMD64VPMAXUB512 + return true case OpMin32F: return rewriteValueAMD64_OpMin32F(v) case OpMin64F: return rewriteValueAMD64_OpMin64F(v) case OpMinFloat32x16: - return rewriteValueAMD64_OpMinFloat32x16(v) + v.Op = OpAMD64VMINPS512 + return true case OpMinFloat32x4: - return rewriteValueAMD64_OpMinFloat32x4(v) + v.Op = OpAMD64VMINPS128 + return true case OpMinFloat32x8: - return rewriteValueAMD64_OpMinFloat32x8(v) + v.Op = OpAMD64VMINPS256 + return true case OpMinFloat64x2: - return rewriteValueAMD64_OpMinFloat64x2(v) + v.Op = OpAMD64VMINPD128 + return true case OpMinFloat64x4: - return rewriteValueAMD64_OpMinFloat64x4(v) + v.Op = OpAMD64VMINPD256 + return true case OpMinFloat64x8: - return rewriteValueAMD64_OpMinFloat64x8(v) + v.Op = OpAMD64VMINPD512 + return true case OpMinInt16x16: - return rewriteValueAMD64_OpMinInt16x16(v) + v.Op = OpAMD64VPMINSW256 + return true case OpMinInt16x32: - return rewriteValueAMD64_OpMinInt16x32(v) + v.Op = OpAMD64VPMINSW512 + return true case OpMinInt16x8: - return rewriteValueAMD64_OpMinInt16x8(v) + v.Op = OpAMD64VPMINSW128 + return true case OpMinInt32x16: - return rewriteValueAMD64_OpMinInt32x16(v) + v.Op = OpAMD64VPMINSD512 + return true case OpMinInt32x4: - return rewriteValueAMD64_OpMinInt32x4(v) + v.Op = OpAMD64VPMINSD128 + return true case OpMinInt32x8: - return rewriteValueAMD64_OpMinInt32x8(v) + v.Op = OpAMD64VPMINSD256 + return true case OpMinInt64x2: - return rewriteValueAMD64_OpMinInt64x2(v) + v.Op = OpAMD64VPMINSQ128 + return true case OpMinInt64x4: - return rewriteValueAMD64_OpMinInt64x4(v) + v.Op = OpAMD64VPMINSQ256 + return true case OpMinInt64x8: - return rewriteValueAMD64_OpMinInt64x8(v) + v.Op = OpAMD64VPMINSQ512 + return true case OpMinInt8x16: - return rewriteValueAMD64_OpMinInt8x16(v) + v.Op = OpAMD64VPMINSB128 + return true case OpMinInt8x32: - return rewriteValueAMD64_OpMinInt8x32(v) + v.Op = OpAMD64VPMINSB256 + return true case OpMinInt8x64: - return rewriteValueAMD64_OpMinInt8x64(v) + v.Op = OpAMD64VPMINSB512 + return true case OpMinUint16x16: - return rewriteValueAMD64_OpMinUint16x16(v) + v.Op = OpAMD64VPMINUW256 + return true case OpMinUint16x32: - return rewriteValueAMD64_OpMinUint16x32(v) + v.Op = OpAMD64VPMINUW512 + return true case OpMinUint16x8: - return rewriteValueAMD64_OpMinUint16x8(v) + v.Op = OpAMD64VPMINUW128 + return true case OpMinUint32x16: - return rewriteValueAMD64_OpMinUint32x16(v) + v.Op = OpAMD64VPMINUD512 + return true case OpMinUint32x4: - return rewriteValueAMD64_OpMinUint32x4(v) + v.Op = OpAMD64VPMINUD128 + return true case OpMinUint32x8: - return rewriteValueAMD64_OpMinUint32x8(v) + v.Op = OpAMD64VPMINUD256 + return true case OpMinUint64x2: - return rewriteValueAMD64_OpMinUint64x2(v) + v.Op = OpAMD64VPMINUQ128 + return true case OpMinUint64x4: - return rewriteValueAMD64_OpMinUint64x4(v) + v.Op = OpAMD64VPMINUQ256 + return true case OpMinUint64x8: - return rewriteValueAMD64_OpMinUint64x8(v) + v.Op = OpAMD64VPMINUQ512 + return true case OpMinUint8x16: - return rewriteValueAMD64_OpMinUint8x16(v) + v.Op = OpAMD64VPMINUB128 + return true case OpMinUint8x32: - return rewriteValueAMD64_OpMinUint8x32(v) + v.Op = OpAMD64VPMINUB256 + return true case OpMinUint8x64: - return rewriteValueAMD64_OpMinUint8x64(v) + v.Op = OpAMD64VPMINUB512 + return true case OpMod16: return rewriteValueAMD64_OpMod16(v) case OpMod16u: @@ -2617,79 +2810,116 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MULL return true case OpMulByPowOf2Float32x16: - return rewriteValueAMD64_OpMulByPowOf2Float32x16(v) + v.Op = OpAMD64VSCALEFPS512 + return true case OpMulByPowOf2Float32x4: - return rewriteValueAMD64_OpMulByPowOf2Float32x4(v) + v.Op = OpAMD64VSCALEFPS128 + return true case OpMulByPowOf2Float32x8: - return rewriteValueAMD64_OpMulByPowOf2Float32x8(v) + v.Op = OpAMD64VSCALEFPS256 + return true case OpMulByPowOf2Float64x2: - return rewriteValueAMD64_OpMulByPowOf2Float64x2(v) + v.Op = OpAMD64VSCALEFPD128 + return true case OpMulByPowOf2Float64x4: - return rewriteValueAMD64_OpMulByPowOf2Float64x4(v) + v.Op = OpAMD64VSCALEFPD256 + return true case OpMulByPowOf2Float64x8: - return rewriteValueAMD64_OpMulByPowOf2Float64x8(v) + v.Op = OpAMD64VSCALEFPD512 + return true case OpMulEvenWidenInt32x4: - return rewriteValueAMD64_OpMulEvenWidenInt32x4(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt32x8: - return rewriteValueAMD64_OpMulEvenWidenInt32x8(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x2: - return rewriteValueAMD64_OpMulEvenWidenInt64x2(v) + v.Op = OpAMD64VPMULDQ128 + return true case OpMulEvenWidenInt64x4: - return rewriteValueAMD64_OpMulEvenWidenInt64x4(v) + v.Op = OpAMD64VPMULDQ256 + return true case OpMulEvenWidenInt64x8: - return rewriteValueAMD64_OpMulEvenWidenInt64x8(v) + v.Op = OpAMD64VPMULDQ512 + return true case OpMulEvenWidenUint32x4: - return rewriteValueAMD64_OpMulEvenWidenUint32x4(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint32x8: - return rewriteValueAMD64_OpMulEvenWidenUint32x8(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x2: - return rewriteValueAMD64_OpMulEvenWidenUint64x2(v) + v.Op = OpAMD64VPMULUDQ128 + return true case OpMulEvenWidenUint64x4: - return rewriteValueAMD64_OpMulEvenWidenUint64x4(v) + v.Op = OpAMD64VPMULUDQ256 + return true case OpMulEvenWidenUint64x8: - return rewriteValueAMD64_OpMulEvenWidenUint64x8(v) + v.Op = OpAMD64VPMULUDQ512 + return true case OpMulFloat32x16: - return rewriteValueAMD64_OpMulFloat32x16(v) + v.Op = OpAMD64VMULPS512 + return true case OpMulFloat32x4: - return rewriteValueAMD64_OpMulFloat32x4(v) + v.Op = OpAMD64VMULPS128 + return true case OpMulFloat32x8: - return rewriteValueAMD64_OpMulFloat32x8(v) + v.Op = OpAMD64VMULPS256 + return true case OpMulFloat64x2: - return rewriteValueAMD64_OpMulFloat64x2(v) + v.Op = OpAMD64VMULPD128 + return true case OpMulFloat64x4: - return rewriteValueAMD64_OpMulFloat64x4(v) + v.Op = OpAMD64VMULPD256 + return true case OpMulFloat64x8: - return rewriteValueAMD64_OpMulFloat64x8(v) + v.Op = OpAMD64VMULPD512 + return true case OpMulHighInt16x16: - return rewriteValueAMD64_OpMulHighInt16x16(v) + v.Op = OpAMD64VPMULHW256 + return true case OpMulHighInt16x32: - return rewriteValueAMD64_OpMulHighInt16x32(v) + v.Op = OpAMD64VPMULHW512 + return true case OpMulHighInt16x8: - return rewriteValueAMD64_OpMulHighInt16x8(v) + v.Op = OpAMD64VPMULHW128 + return true case OpMulHighUint16x16: - return rewriteValueAMD64_OpMulHighUint16x16(v) + v.Op = OpAMD64VPMULHUW256 + return true case OpMulHighUint16x32: - return rewriteValueAMD64_OpMulHighUint16x32(v) + v.Op = OpAMD64VPMULHUW512 + return true case OpMulHighUint16x8: - return rewriteValueAMD64_OpMulHighUint16x8(v) + v.Op = OpAMD64VPMULHUW128 + return true case OpMulLowInt16x16: - return rewriteValueAMD64_OpMulLowInt16x16(v) + v.Op = OpAMD64VPMULLW256 + return true case OpMulLowInt16x32: - return rewriteValueAMD64_OpMulLowInt16x32(v) + v.Op = OpAMD64VPMULLW512 + return true case OpMulLowInt16x8: - return rewriteValueAMD64_OpMulLowInt16x8(v) + v.Op = OpAMD64VPMULLW128 + return true case OpMulLowInt32x16: - return rewriteValueAMD64_OpMulLowInt32x16(v) + v.Op = OpAMD64VPMULLD512 + return true case OpMulLowInt32x4: - return rewriteValueAMD64_OpMulLowInt32x4(v) + v.Op = OpAMD64VPMULLD128 + return true case OpMulLowInt32x8: - return rewriteValueAMD64_OpMulLowInt32x8(v) + v.Op = OpAMD64VPMULLD256 + return true case OpMulLowInt64x2: - return rewriteValueAMD64_OpMulLowInt64x2(v) + v.Op = OpAMD64VPMULLQ128 + return true case OpMulLowInt64x4: - return rewriteValueAMD64_OpMulLowInt64x4(v) + v.Op = OpAMD64VPMULLQ256 + return true case OpMulLowInt64x8: - return rewriteValueAMD64_OpMulLowInt64x8(v) + v.Op = OpAMD64VPMULLQ512 + return true case OpNeg16: v.Op = OpAMD64NEGL return true @@ -2805,105 +3035,155 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64ORL return true case OpOrFloat32x16: - return rewriteValueAMD64_OpOrFloat32x16(v) + v.Op = OpAMD64VORPS512 + return true case OpOrFloat32x4: - return rewriteValueAMD64_OpOrFloat32x4(v) + v.Op = OpAMD64VORPS128 + return true case OpOrFloat32x8: - return rewriteValueAMD64_OpOrFloat32x8(v) + v.Op = OpAMD64VORPS256 + return true case OpOrFloat64x2: - return rewriteValueAMD64_OpOrFloat64x2(v) + v.Op = OpAMD64VORPD128 + return true case OpOrFloat64x4: - return rewriteValueAMD64_OpOrFloat64x4(v) + v.Op = OpAMD64VORPD256 + return true case OpOrFloat64x8: - return rewriteValueAMD64_OpOrFloat64x8(v) + v.Op = OpAMD64VORPD512 + return true case OpOrInt16x16: - return rewriteValueAMD64_OpOrInt16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt16x8: - return rewriteValueAMD64_OpOrInt16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x16: - return rewriteValueAMD64_OpOrInt32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrInt32x4: - return rewriteValueAMD64_OpOrInt32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt32x8: - return rewriteValueAMD64_OpOrInt32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x2: - return rewriteValueAMD64_OpOrInt64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt64x4: - return rewriteValueAMD64_OpOrInt64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrInt64x8: - return rewriteValueAMD64_OpOrInt64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrInt8x16: - return rewriteValueAMD64_OpOrInt8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrInt8x32: - return rewriteValueAMD64_OpOrInt8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x16: - return rewriteValueAMD64_OpOrUint16x16(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint16x8: - return rewriteValueAMD64_OpOrUint16x8(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x16: - return rewriteValueAMD64_OpOrUint32x16(v) + v.Op = OpAMD64VPORD512 + return true case OpOrUint32x4: - return rewriteValueAMD64_OpOrUint32x4(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint32x8: - return rewriteValueAMD64_OpOrUint32x8(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x2: - return rewriteValueAMD64_OpOrUint64x2(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint64x4: - return rewriteValueAMD64_OpOrUint64x4(v) + v.Op = OpAMD64VPOR256 + return true case OpOrUint64x8: - return rewriteValueAMD64_OpOrUint64x8(v) + v.Op = OpAMD64VPORQ512 + return true case OpOrUint8x16: - return rewriteValueAMD64_OpOrUint8x16(v) + v.Op = OpAMD64VPOR128 + return true case OpOrUint8x32: - return rewriteValueAMD64_OpOrUint8x32(v) + v.Op = OpAMD64VPOR256 + return true case OpPairwiseAddFloat32x4: - return rewriteValueAMD64_OpPairwiseAddFloat32x4(v) + v.Op = OpAMD64VHADDPS128 + return true case OpPairwiseAddFloat32x8: - return rewriteValueAMD64_OpPairwiseAddFloat32x8(v) + v.Op = OpAMD64VHADDPS256 + return true case OpPairwiseAddFloat64x2: - return rewriteValueAMD64_OpPairwiseAddFloat64x2(v) + v.Op = OpAMD64VHADDPD128 + return true case OpPairwiseAddFloat64x4: - return rewriteValueAMD64_OpPairwiseAddFloat64x4(v) + v.Op = OpAMD64VHADDPD256 + return true case OpPairwiseAddInt16x16: - return rewriteValueAMD64_OpPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddInt16x8: - return rewriteValueAMD64_OpPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddInt32x4: - return rewriteValueAMD64_OpPairwiseAddInt32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddInt32x8: - return rewriteValueAMD64_OpPairwiseAddInt32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseAddUint16x16: - return rewriteValueAMD64_OpPairwiseAddUint16x16(v) + v.Op = OpAMD64VPHADDW256 + return true case OpPairwiseAddUint16x8: - return rewriteValueAMD64_OpPairwiseAddUint16x8(v) + v.Op = OpAMD64VPHADDW128 + return true case OpPairwiseAddUint32x4: - return rewriteValueAMD64_OpPairwiseAddUint32x4(v) + v.Op = OpAMD64VPHADDD128 + return true case OpPairwiseAddUint32x8: - return rewriteValueAMD64_OpPairwiseAddUint32x8(v) + v.Op = OpAMD64VPHADDD256 + return true case OpPairwiseSubFloat32x4: - return rewriteValueAMD64_OpPairwiseSubFloat32x4(v) + v.Op = OpAMD64VHSUBPS128 + return true case OpPairwiseSubFloat32x8: - return rewriteValueAMD64_OpPairwiseSubFloat32x8(v) + v.Op = OpAMD64VHSUBPS256 + return true case OpPairwiseSubFloat64x2: - return rewriteValueAMD64_OpPairwiseSubFloat64x2(v) + v.Op = OpAMD64VHSUBPD128 + return true case OpPairwiseSubFloat64x4: - return rewriteValueAMD64_OpPairwiseSubFloat64x4(v) + v.Op = OpAMD64VHSUBPD256 + return true case OpPairwiseSubInt16x16: - return rewriteValueAMD64_OpPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubInt16x8: - return rewriteValueAMD64_OpPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubInt32x4: - return rewriteValueAMD64_OpPairwiseSubInt32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubInt32x8: - return rewriteValueAMD64_OpPairwiseSubInt32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPairwiseSubUint16x16: - return rewriteValueAMD64_OpPairwiseSubUint16x16(v) + v.Op = OpAMD64VPHSUBW256 + return true case OpPairwiseSubUint16x8: - return rewriteValueAMD64_OpPairwiseSubUint16x8(v) + v.Op = OpAMD64VPHSUBW128 + return true case OpPairwiseSubUint32x4: - return rewriteValueAMD64_OpPairwiseSubUint32x4(v) + v.Op = OpAMD64VPHSUBD128 + return true case OpPairwiseSubUint32x8: - return rewriteValueAMD64_OpPairwiseSubUint32x8(v) + v.Op = OpAMD64VPHSUBD256 + return true case OpPanicBounds: return rewriteValueAMD64_OpPanicBounds(v) case OpPopCount16: @@ -2917,53 +3197,77 @@ func rewriteValueAMD64(v *Value) bool { case OpPopCount8: return rewriteValueAMD64_OpPopCount8(v) case OpPopCountInt16x16: - return rewriteValueAMD64_OpPopCountInt16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountInt16x32: - return rewriteValueAMD64_OpPopCountInt16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountInt16x8: - return rewriteValueAMD64_OpPopCountInt16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountInt32x16: - return rewriteValueAMD64_OpPopCountInt32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountInt32x4: - return rewriteValueAMD64_OpPopCountInt32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountInt32x8: - return rewriteValueAMD64_OpPopCountInt32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountInt64x2: - return rewriteValueAMD64_OpPopCountInt64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountInt64x4: - return rewriteValueAMD64_OpPopCountInt64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountInt64x8: - return rewriteValueAMD64_OpPopCountInt64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountInt8x16: - return rewriteValueAMD64_OpPopCountInt8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountInt8x32: - return rewriteValueAMD64_OpPopCountInt8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountInt8x64: - return rewriteValueAMD64_OpPopCountInt8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPopCountUint16x16: - return rewriteValueAMD64_OpPopCountUint16x16(v) + v.Op = OpAMD64VPOPCNTW256 + return true case OpPopCountUint16x32: - return rewriteValueAMD64_OpPopCountUint16x32(v) + v.Op = OpAMD64VPOPCNTW512 + return true case OpPopCountUint16x8: - return rewriteValueAMD64_OpPopCountUint16x8(v) + v.Op = OpAMD64VPOPCNTW128 + return true case OpPopCountUint32x16: - return rewriteValueAMD64_OpPopCountUint32x16(v) + v.Op = OpAMD64VPOPCNTD512 + return true case OpPopCountUint32x4: - return rewriteValueAMD64_OpPopCountUint32x4(v) + v.Op = OpAMD64VPOPCNTD128 + return true case OpPopCountUint32x8: - return rewriteValueAMD64_OpPopCountUint32x8(v) + v.Op = OpAMD64VPOPCNTD256 + return true case OpPopCountUint64x2: - return rewriteValueAMD64_OpPopCountUint64x2(v) + v.Op = OpAMD64VPOPCNTQ128 + return true case OpPopCountUint64x4: - return rewriteValueAMD64_OpPopCountUint64x4(v) + v.Op = OpAMD64VPOPCNTQ256 + return true case OpPopCountUint64x8: - return rewriteValueAMD64_OpPopCountUint64x8(v) + v.Op = OpAMD64VPOPCNTQ512 + return true case OpPopCountUint8x16: - return rewriteValueAMD64_OpPopCountUint8x16(v) + v.Op = OpAMD64VPOPCNTB128 + return true case OpPopCountUint8x32: - return rewriteValueAMD64_OpPopCountUint8x32(v) + v.Op = OpAMD64VPOPCNTB256 + return true case OpPopCountUint8x64: - return rewriteValueAMD64_OpPopCountUint8x64(v) + v.Op = OpAMD64VPOPCNTB512 + return true case OpPrefetchCache: v.Op = OpAMD64PrefetchT0 return true @@ -3055,61 +3359,89 @@ func rewriteValueAMD64(v *Value) bool { case OpRsh8x8: return rewriteValueAMD64_OpRsh8x8(v) case OpSaturatedAddInt16x16: - return rewriteValueAMD64_OpSaturatedAddInt16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddInt16x32: - return rewriteValueAMD64_OpSaturatedAddInt16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddInt16x8: - return rewriteValueAMD64_OpSaturatedAddInt16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddInt8x16: - return rewriteValueAMD64_OpSaturatedAddInt8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddInt8x32: - return rewriteValueAMD64_OpSaturatedAddInt8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddInt8x64: - return rewriteValueAMD64_OpSaturatedAddInt8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedAddUint16x16: - return rewriteValueAMD64_OpSaturatedAddUint16x16(v) + v.Op = OpAMD64VPADDSW256 + return true case OpSaturatedAddUint16x32: - return rewriteValueAMD64_OpSaturatedAddUint16x32(v) + v.Op = OpAMD64VPADDSW512 + return true case OpSaturatedAddUint16x8: - return rewriteValueAMD64_OpSaturatedAddUint16x8(v) + v.Op = OpAMD64VPADDSW128 + return true case OpSaturatedAddUint8x16: - return rewriteValueAMD64_OpSaturatedAddUint8x16(v) + v.Op = OpAMD64VPADDSB128 + return true case OpSaturatedAddUint8x32: - return rewriteValueAMD64_OpSaturatedAddUint8x32(v) + v.Op = OpAMD64VPADDSB256 + return true case OpSaturatedAddUint8x64: - return rewriteValueAMD64_OpSaturatedAddUint8x64(v) + v.Op = OpAMD64VPADDSB512 + return true case OpSaturatedPairwiseAddInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v) + v.Op = OpAMD64VPHADDSW256 + return true case OpSaturatedPairwiseAddInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v) + v.Op = OpAMD64VPHADDSW128 + return true case OpSaturatedPairwiseSubInt16x16: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v) + v.Op = OpAMD64VPHSUBSW256 + return true case OpSaturatedPairwiseSubInt16x8: - return rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v) + v.Op = OpAMD64VPHSUBSW128 + return true case OpSaturatedSubInt16x16: - return rewriteValueAMD64_OpSaturatedSubInt16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubInt16x32: - return rewriteValueAMD64_OpSaturatedSubInt16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubInt16x8: - return rewriteValueAMD64_OpSaturatedSubInt16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubInt8x16: - return rewriteValueAMD64_OpSaturatedSubInt8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubInt8x32: - return rewriteValueAMD64_OpSaturatedSubInt8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubInt8x64: - return rewriteValueAMD64_OpSaturatedSubInt8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSaturatedSubUint16x16: - return rewriteValueAMD64_OpSaturatedSubUint16x16(v) + v.Op = OpAMD64VPSUBSW256 + return true case OpSaturatedSubUint16x32: - return rewriteValueAMD64_OpSaturatedSubUint16x32(v) + v.Op = OpAMD64VPSUBSW512 + return true case OpSaturatedSubUint16x8: - return rewriteValueAMD64_OpSaturatedSubUint16x8(v) + v.Op = OpAMD64VPSUBSW128 + return true case OpSaturatedSubUint8x16: - return rewriteValueAMD64_OpSaturatedSubUint8x16(v) + v.Op = OpAMD64VPSUBSB128 + return true case OpSaturatedSubUint8x32: - return rewriteValueAMD64_OpSaturatedSubUint8x32(v) + v.Op = OpAMD64VPSUBSB256 + return true case OpSaturatedSubUint8x64: - return rewriteValueAMD64_OpSaturatedSubUint8x64(v) + v.Op = OpAMD64VPSUBSB512 + return true case OpSelect0: return rewriteValueAMD64_OpSelect0(v) case OpSelect1: @@ -3135,17 +3467,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64MOVBQSX return true case OpSignInt16x16: - return rewriteValueAMD64_OpSignInt16x16(v) + v.Op = OpAMD64VPSIGNW256 + return true case OpSignInt16x8: - return rewriteValueAMD64_OpSignInt16x8(v) + v.Op = OpAMD64VPSIGNW128 + return true case OpSignInt32x4: - return rewriteValueAMD64_OpSignInt32x4(v) + v.Op = OpAMD64VPSIGND128 + return true case OpSignInt32x8: - return rewriteValueAMD64_OpSignInt32x8(v) + v.Op = OpAMD64VPSIGND256 + return true case OpSignInt8x16: - return rewriteValueAMD64_OpSignInt8x16(v) + v.Op = OpAMD64VPSIGNB128 + return true case OpSignInt8x32: - return rewriteValueAMD64_OpSignInt8x32(v) + v.Op = OpAMD64VPSIGNB256 + return true case OpSlicemask: return rewriteValueAMD64_OpSlicemask(v) case OpSpectreIndex: @@ -3159,17 +3497,23 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SQRTSS return true case OpSqrtFloat32x16: - return rewriteValueAMD64_OpSqrtFloat32x16(v) + v.Op = OpAMD64VSQRTPS512 + return true case OpSqrtFloat32x4: - return rewriteValueAMD64_OpSqrtFloat32x4(v) + v.Op = OpAMD64VSQRTPS128 + return true case OpSqrtFloat32x8: - return rewriteValueAMD64_OpSqrtFloat32x8(v) + v.Op = OpAMD64VSQRTPS256 + return true case OpSqrtFloat64x2: - return rewriteValueAMD64_OpSqrtFloat64x2(v) + v.Op = OpAMD64VSQRTPD128 + return true case OpSqrtFloat64x4: - return rewriteValueAMD64_OpSqrtFloat64x4(v) + v.Op = OpAMD64VSQRTPD256 + return true case OpSqrtFloat64x8: - return rewriteValueAMD64_OpSqrtFloat64x8(v) + v.Op = OpAMD64VSQRTPD512 + return true case OpStaticCall: v.Op = OpAMD64CALLstatic return true @@ -3194,68 +3538,98 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64SUBL return true case OpSubFloat32x16: - return rewriteValueAMD64_OpSubFloat32x16(v) + v.Op = OpAMD64VADDPS512 + return true case OpSubFloat32x4: - return rewriteValueAMD64_OpSubFloat32x4(v) + v.Op = OpAMD64VADDPS128 + return true case OpSubFloat32x8: - return rewriteValueAMD64_OpSubFloat32x8(v) + v.Op = OpAMD64VADDPS256 + return true case OpSubFloat64x2: - return rewriteValueAMD64_OpSubFloat64x2(v) + v.Op = OpAMD64VADDPD128 + return true case OpSubFloat64x4: - return rewriteValueAMD64_OpSubFloat64x4(v) + v.Op = OpAMD64VADDPD256 + return true case OpSubFloat64x8: - return rewriteValueAMD64_OpSubFloat64x8(v) + v.Op = OpAMD64VADDPD512 + return true case OpSubInt16x16: - return rewriteValueAMD64_OpSubInt16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubInt16x32: - return rewriteValueAMD64_OpSubInt16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubInt16x8: - return rewriteValueAMD64_OpSubInt16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubInt32x16: - return rewriteValueAMD64_OpSubInt32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubInt32x4: - return rewriteValueAMD64_OpSubInt32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubInt32x8: - return rewriteValueAMD64_OpSubInt32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubInt64x2: - return rewriteValueAMD64_OpSubInt64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubInt64x4: - return rewriteValueAMD64_OpSubInt64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubInt64x8: - return rewriteValueAMD64_OpSubInt64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubInt8x16: - return rewriteValueAMD64_OpSubInt8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubInt8x32: - return rewriteValueAMD64_OpSubInt8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubInt8x64: - return rewriteValueAMD64_OpSubInt8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpSubPtr: v.Op = OpAMD64SUBQ return true case OpSubUint16x16: - return rewriteValueAMD64_OpSubUint16x16(v) + v.Op = OpAMD64VPSUBW256 + return true case OpSubUint16x32: - return rewriteValueAMD64_OpSubUint16x32(v) + v.Op = OpAMD64VPSUBW512 + return true case OpSubUint16x8: - return rewriteValueAMD64_OpSubUint16x8(v) + v.Op = OpAMD64VPSUBW128 + return true case OpSubUint32x16: - return rewriteValueAMD64_OpSubUint32x16(v) + v.Op = OpAMD64VPSUBD512 + return true case OpSubUint32x4: - return rewriteValueAMD64_OpSubUint32x4(v) + v.Op = OpAMD64VPSUBD128 + return true case OpSubUint32x8: - return rewriteValueAMD64_OpSubUint32x8(v) + v.Op = OpAMD64VPSUBD256 + return true case OpSubUint64x2: - return rewriteValueAMD64_OpSubUint64x2(v) + v.Op = OpAMD64VPSUBQ128 + return true case OpSubUint64x4: - return rewriteValueAMD64_OpSubUint64x4(v) + v.Op = OpAMD64VPSUBQ256 + return true case OpSubUint64x8: - return rewriteValueAMD64_OpSubUint64x8(v) + v.Op = OpAMD64VPSUBQ512 + return true case OpSubUint8x16: - return rewriteValueAMD64_OpSubUint8x16(v) + v.Op = OpAMD64VPSUBB128 + return true case OpSubUint8x32: - return rewriteValueAMD64_OpSubUint8x32(v) + v.Op = OpAMD64VPSUBB256 + return true case OpSubUint8x64: - return rewriteValueAMD64_OpSubUint8x64(v) + v.Op = OpAMD64VPSUBB512 + return true case OpTailCall: v.Op = OpAMD64CALLtail return true @@ -3295,57 +3669,83 @@ func rewriteValueAMD64(v *Value) bool { v.Op = OpAMD64XORL return true case OpXorFloat32x16: - return rewriteValueAMD64_OpXorFloat32x16(v) + v.Op = OpAMD64VXORPS512 + return true case OpXorFloat32x4: - return rewriteValueAMD64_OpXorFloat32x4(v) + v.Op = OpAMD64VXORPS128 + return true case OpXorFloat32x8: - return rewriteValueAMD64_OpXorFloat32x8(v) + v.Op = OpAMD64VXORPS256 + return true case OpXorFloat64x2: - return rewriteValueAMD64_OpXorFloat64x2(v) + v.Op = OpAMD64VXORPD128 + return true case OpXorFloat64x4: - return rewriteValueAMD64_OpXorFloat64x4(v) + v.Op = OpAMD64VXORPD256 + return true case OpXorFloat64x8: - return rewriteValueAMD64_OpXorFloat64x8(v) + v.Op = OpAMD64VXORPD512 + return true case OpXorInt16x16: - return rewriteValueAMD64_OpXorInt16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt16x8: - return rewriteValueAMD64_OpXorInt16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x16: - return rewriteValueAMD64_OpXorInt32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorInt32x4: - return rewriteValueAMD64_OpXorInt32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt32x8: - return rewriteValueAMD64_OpXorInt32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x2: - return rewriteValueAMD64_OpXorInt64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt64x4: - return rewriteValueAMD64_OpXorInt64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorInt64x8: - return rewriteValueAMD64_OpXorInt64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorInt8x16: - return rewriteValueAMD64_OpXorInt8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorInt8x32: - return rewriteValueAMD64_OpXorInt8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x16: - return rewriteValueAMD64_OpXorUint16x16(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint16x8: - return rewriteValueAMD64_OpXorUint16x8(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x16: - return rewriteValueAMD64_OpXorUint32x16(v) + v.Op = OpAMD64VPXORD512 + return true case OpXorUint32x4: - return rewriteValueAMD64_OpXorUint32x4(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint32x8: - return rewriteValueAMD64_OpXorUint32x8(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x2: - return rewriteValueAMD64_OpXorUint64x2(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint64x4: - return rewriteValueAMD64_OpXorUint64x4(v) + v.Op = OpAMD64VPXOR256 + return true case OpXorUint64x8: - return rewriteValueAMD64_OpXorUint64x8(v) + v.Op = OpAMD64VPXORQ512 + return true case OpXorUint8x16: - return rewriteValueAMD64_OpXorUint8x16(v) + v.Op = OpAMD64VPXOR128 + return true case OpXorUint8x32: - return rewriteValueAMD64_OpXorUint8x32(v) + v.Op = OpAMD64VPXOR256 + return true case OpZero: return rewriteValueAMD64_OpZero(v) case OpZeroExt16to32: @@ -26050,20295 +26450,16687 @@ func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool { } return false } -func rewriteValueAMD64_OpAbsoluteInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x16 x) - // result: (VPABSW256 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x32 x) - // result: (VPABSW512 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (AbsoluteInt16x8 x) - // result: (VPABSW128 x) - for { - x := v_0 - v.reset(OpAMD64VPABSW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAddr(v *Value) bool { v_0 := v.Args[0] - // match: (AbsoluteInt32x16 x) - // result: (VPABSD512 x) + // match: (Addr {sym} base) + // result: (LEAQ {sym} base) for { - x := v_0 - v.reset(OpAMD64VPABSD512) - v.AddArg(x) + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x4 x) - // result: (VPABSD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd32 ptr val mem) + // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst32) + v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt32x8 x) - // result: (VPABSD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicAdd64 ptr val mem) + // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) for { - x := v_0 - v.reset(OpAMD64VPABSD256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64AddTupleFirst64) + v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg2(val, v0) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x2 x) - // result: (VPABSQ128 x) + // match: (AtomicAnd32 ptr val mem) + // result: (ANDLlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x4 x) - // result: (VPABSQ256 x) + // match: (AtomicAnd32value ptr val mem) + // result: (LoweredAtomicAnd32 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ256) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt64x8 x) - // result: (VPABSQ512 x) + // match: (AtomicAnd64value ptr val mem) + // result: (LoweredAtomicAnd64 ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSQ512) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicAnd64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x16 x) - // result: (VPABSB128 x) + // match: (AtomicAnd8 ptr val mem) + // result: (ANDBlock ptr val mem) for { - x := v_0 - v.reset(OpAMD64VPABSB128) - v.AddArg(x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ANDBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x32 x) - // result: (VPABSB256 x) + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // result: (CMPXCHGLlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB256) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGLlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { + v_3 := v.Args[3] + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AbsoluteInt8x64 x) - // result: (VPABSB512 x) + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // result: (CMPXCHGQlock ptr old new_ mem) for { - x := v_0 - v.reset(OpAMD64VPABSB512) - v.AddArg(x) + ptr := v_0 + old := v_1 + new_ := v_2 + mem := v_3 + v.reset(OpAMD64CMPXCHGQlock) + v.AddArg4(ptr, old, new_, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x16 x y) - // result: (VADDPS512 y x) + // match: (AtomicExchange32 ptr val mem) + // result: (XCHGL val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGL) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x4 x y) - // result: (VADDPS128 y x) + // match: (AtomicExchange64 ptr val mem) + // result: (XCHGQ val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGQ) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat32x8 x y) - // result: (VADDPS256 y x) + // match: (AtomicExchange8 ptr val mem) + // result: (XCHGB val ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64XCHGB) + v.AddArg3(val, ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x2 x y) - // result: (VADDPD128 y x) + // match: (AtomicLoad32 ptr mem) + // result: (MOVLatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVLatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x4 x y) - // result: (VADDPD256 y x) + // match: (AtomicLoad64 ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddFloat64x8 x y) - // result: (VADDPD512 y x) + // match: (AtomicLoad8 ptr mem) + // result: (MOVBatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVBatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x16 x y) - // result: (VPADDW256 y x) + // match: (AtomicLoadPtr ptr mem) + // result: (MOVQatomicload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + ptr := v_0 + mem := v_1 + v.reset(OpAMD64MOVQatomicload) + v.AddArg2(ptr, mem) return true } } -func rewriteValueAMD64_OpAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x32 x y) - // result: (VPADDW512 y x) + // match: (AtomicOr32 ptr val mem) + // result: (ORLlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORLlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt16x8 x y) - // result: (VPADDW128 y x) + // match: (AtomicOr32value ptr val mem) + // result: (LoweredAtomicOr32 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr32) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x16 x y) - // result: (VPADDD512 y x) + // match: (AtomicOr64value ptr val mem) + // result: (LoweredAtomicOr64 ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64LoweredAtomicOr64) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x4 x y) - // result: (VPADDD128 y x) + // match: (AtomicOr8 ptr val mem) + // result: (ORBlock ptr val mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpAMD64ORBlock) + v.AddArg3(ptr, val, mem) return true } } -func rewriteValueAMD64_OpAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt32x8 x y) - // result: (VPADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore32 ptr val mem) + // result: (Select1 (XCHGL val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x2 x y) - // result: (VPADDQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore64 ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStore8 ptr val mem) + // result: (Select1 (XCHGB val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddInt64x8 x y) - // result: (VPADDQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (AtomicStorePtrNoWB ptr val mem) + // result: (Select1 (XCHGQ val ptr mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + ptr := v_0 + val := v_1 + mem := v_2 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) + v0.AddArg3(val, ptr, mem) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAddInt8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen16(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x16 x y) - // result: (VPADDB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddInt8x32 x y) - // result: (VPADDB256 y x) + // match: (BitLen16 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddInt8x64(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen32(v *Value) bool { v_0 := v.Args[0] - // match: (AddInt8x64 x y) - // result: (VPADDB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v2.AddArg(x) + v1.AddArg2(v2, v2) + v0.AddArg(v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x16 x y) - // result: (VPADDW256 y x) + // match: (BitLen32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint16x32(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen64(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint16x32 x y) - // result: (VPADDW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) + v1 := b.NewValue0(v.Pos, OpSelect0, t) + v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v2.AddArg(x) + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v3.AuxInt = int64ToAuxInt(-1) + v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v4.AddArg(v2) + v0.AddArg3(v1, v3, v4) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint16x8 x y) - // result: (VPADDW128 y x) + // match: (BitLen64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDW128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-64) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBitLen8(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x16 x y) - // result: (VPADDD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSRL) + v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) + v1.AddArg(x) + v0.AddArg2(v1, v1) + v.AddArg(v0) return true } -} -func rewriteValueAMD64_OpAddUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint32x4 x y) - // result: (VPADDD128 y x) + // match: (BitLen8 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) for { + t := v.Type x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD128) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) + v0.AuxInt = int32ToAuxInt(-32) + v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) + v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) + v2.AddArg(x) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpAddUint32x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpBswap16(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint32x8 x y) - // result: (VPADDD256 y x) + // match: (Bswap16 x) + // result: (ROLWconst [8] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64ROLWconst) + v.AuxInt = int8ToAuxInt(8) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCeil(v *Value) bool { v_0 := v.Args[0] - // match: (AddUint64x2 x y) - // result: (VPADDQ128 y x) + // match: (Ceil x) + // result: (ROUNDSD [2] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPADDQ128) - v.AddArg2(y, x) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(2) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpCondSelect(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AddUint64x4 x y) - // result: (VPADDQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (CondSelect x y (SETEQ cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint64x8 x y) - // result: (VPADDQ512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x16 x y) - // result: (VPADDB128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x32 x y) - // result: (VPADDB256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddUint8x64 x y) - // result: (VPADDB512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPADDB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAddr(v *Value) bool { - v_0 := v.Args[0] - // match: (Addr {sym} base) - // result: (LEAQ {sym} base) - for { - sym := auxToSym(v.Aux) - base := v_0 - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x16 x y) - // result: (VANDPS512 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x4 x y) - // result: (VANDPS128 y x) + // match: (CondSelect x y (SETA cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat32x8 x y) - // result: (VANDPS256 y x) + // match: (CondSelect x y (SETB cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPS256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x2 x y) - // result: (VANDPD128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x4 x y) - // result: (VANDPD256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndFloat64x8 x y) - // result: (VANDPD512 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt16x8 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x16 x y) - // result: (VPANDD512 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (CMOVQGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64CMOVQGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x4 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt32x8 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is32BitInt(t) + // result: (CMOVLNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x2 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is32BitInt(t) + // result: (CMOVLLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x4 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is32BitInt(t) + // result: (CMOVLGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt64x8 x y) - // result: (VPANDQ512 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x16 x y) - // result: (VPAND128 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is32BitInt(t) + // result: (CMOVLGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndInt8x32 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is32BitInt(t) + // result: (CMOVLHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x16 x y) - // result: (VANDNPS512 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is32BitInt(t) + // result: (CMOVLCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x4 x y) - // result: (VANDNPS128 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is32BitInt(t) + // result: (CMOVLCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat32x8 x y) - // result: (VANDNPS256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is32BitInt(t) + // result: (CMOVLLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPS256) - v.AddArg2(y, x) - return true + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLLS) + v.AddArg3(y, x, cond) + return true } -} -func rewriteValueAMD64_OpAndNotFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x2 x y) - // result: (VANDNPD128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is32BitInt(t) + // result: (CMOVLEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x4 x y) - // result: (VANDNPD256 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotFloat64x8 x y) - // result: (VANDNPD512 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VANDNPD512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is32BitInt(t) + // result: (CMOVLGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQ cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQ y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQ { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQ) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNE cond)) + // cond: is16BitInt(t) + // result: (CMOVWNE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETL cond)) + // cond: is16BitInt(t) + // result: (CMOVWLT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETL { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETG cond)) + // cond: is16BitInt(t) + // result: (CMOVWGT y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETG { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGT) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETLE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETLE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGE cond)) + // cond: is16BitInt(t) + // result: (CMOVWGE y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGE) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y (SETA cond)) + // cond: is16BitInt(t) + // result: (CMOVWHI y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETA { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWHI) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETB cond)) + // cond: is16BitInt(t) + // result: (CMOVWCS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETB { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotInt8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETAE cond)) + // cond: is16BitInt(t) + // result: (CMOVWCC y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETAE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWCC) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x16 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETBE cond)) + // cond: is16BitInt(t) + // result: (CMOVWLS y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETBE { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWLS) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint16x8 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETEQF cond)) + // cond: is16BitInt(t) + // result: (CMOVWEQF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETEQF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWEQF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x16 x y) - // result: (VPANDND512 y x) + // match: (CondSelect x y (SETNEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWNEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDND512) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETNEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x4 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y (SETGF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGTF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGTF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint32x8 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y (SETGEF cond)) + // cond: is16BitInt(t) + // result: (CMOVWGEF y x cond) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + if v_2.Op != OpAMD64SETGEF { + break + } + cond := v_2.Args[0] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWGEF) + v.AddArg3(y, x, cond) return true } -} -func rewriteValueAMD64_OpAndNotUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x2 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 1 + // result: (CondSelect x y (MOVBQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 1) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x4 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 2 + // result: (CondSelect x y (MOVWQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 2) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint64x8 x y) - // result: (VPANDNQ512 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 4 + // result: (CondSelect x y (MOVLQZX check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDNQ512) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 4) { + break + } + v.reset(OpCondSelect) + v.Type = t + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) + v0.AddArg(check) + v.AddArg3(x, y, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x16 x y) - // result: (VPANDN128 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + // result: (CMOVQNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN128) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { + break + } + v.reset(OpAMD64CMOVQNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndNotUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndNotUint8x32 x y) - // result: (VPANDN256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + // result: (CMOVLNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPANDN256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { + break + } + v.reset(OpAMD64CMOVLNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } -} -func rewriteValueAMD64_OpAndUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x16 x y) - // result: (VPAND256 y x) + // match: (CondSelect x y check) + // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + // result: (CMOVWNE y x (CMPQconst [0] check)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + check := v_2 + if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { + break + } + v.reset(OpAMD64CMOVWNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v0.AddArg(check) + v.AddArg3(y, x, v0) return true } + return false } -func rewriteValueAMD64_OpAndUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint16x8 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConst16(v *Value) bool { + // match: (Const16 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToInt16(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x16 x y) - // result: (VPANDD512 y x) +func rewriteValueAMD64_OpConst8(v *Value) bool { + // match: (Const8 [c]) + // result: (MOVLconst [int32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPANDD512) - v.AddArg2(y, x) + c := auxIntToInt8(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(int32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x4 x y) - // result: (VPAND128 y x) +func rewriteValueAMD64_OpConstBool(v *Value) bool { + // match: (ConstBool [c]) + // result: (MOVLconst [b2i32(c)]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + c := auxIntToBool(v.AuxInt) + v.reset(OpAMD64MOVLconst) + v.AuxInt = int32ToAuxInt(b2i32(c)) return true } } -func rewriteValueAMD64_OpAndUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint32x8 x y) - // result: (VPAND256 y x) +func rewriteValueAMD64_OpConstNil(v *Value) bool { + // match: (ConstNil ) + // result: (MOVQconst [0]) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + v.reset(OpAMD64MOVQconst) + v.AuxInt = int64ToAuxInt(0) return true } } -func rewriteValueAMD64_OpAndUint64x2(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x2 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz16 x) + // result: (BSFL (ORLconst [1<<16] x)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 16) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAndUint64x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint64x4 x y) - // result: (VPAND256 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) return true } -} -func rewriteValueAMD64_OpAndUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint64x8 x y) - // result: (VPANDQ512 y x) + // match: (Ctz16NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPANDQ512) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) + v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpAndUint8x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpCtz32(v *Value) bool { v_0 := v.Args[0] - // match: (AndUint8x16 x y) - // result: (VPAND128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpAndUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AndUint8x32 x y) - // result: (VPAND256 y x) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) + v.AddArg(x) + return true + } + // match: (Ctz32 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ (BTSQconst [32] x))) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPAND256) - v.AddArg2(y, x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) + v1.AuxInt = int8ToAuxInt(32) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x16 x) - // result: (VRCP14PS512 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS512) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x4 x) - // result: (VRCP14PS128 x) + // match: (Ctz32NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRCP14PS128) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpCtz64(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat32x8 x) - // result: (VRCP14PS256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PS256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x2 x) - // result: (VRCP14PD128 x) + // match: (Ctz64 x) + // cond: buildcfg.GOAMD64 < 3 + // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) for { + t := v.Type x := v_0 - v.reset(OpAMD64VRCP14PD128) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64CMOVQEQ) + v0 := b.NewValue0(v.Pos, OpSelect0, t) + v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v3.AddArg(v1) + v.AddArg3(v0, v2, v3) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x4 x) - // result: (VRCP14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTQ x) for { x := v_0 - v.reset(OpAMD64VRCP14PD256) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTQ) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalFloat64x8 x) - // result: (VRCP14PD512 x) + // match: (Ctz64NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (Select0 (BSFQ x)) for { x := v_0 - v.reset(OpAMD64VRCP14PD512) - v.AddArg(x) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) + v0.AddArg(x) + v.AddArg(v0) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpCtz8(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x16 x) - // result: (VRSQRT14PS512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Ctz8 x) + // result: (BSFL (ORLconst [1<<8 ] x)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PS512) - v.AddArg(x) + v.reset(OpAMD64BSFL) + v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) + v0.AuxInt = int32ToAuxInt(1 << 8) + v0.AddArg(x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x4 x) - // result: (VRSQRTPS128 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 >= 3 + // result: (TZCNTL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS128) + if !(buildcfg.GOAMD64 >= 3) { + break + } + v.reset(OpAMD64TZCNTL) v.AddArg(x) return true } -} -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat32x8 x) - // result: (VRSQRTPS256 x) + // match: (Ctz8NonZero x) + // cond: buildcfg.GOAMD64 < 3 + // result: (BSFL x) for { x := v_0 - v.reset(OpAMD64VRSQRTPS256) + if !(buildcfg.GOAMD64 < 3) { + break + } + v.reset(OpAMD64BSFL) v.AddArg(x) return true } + return false } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x2 x) - // result: (VRSQRT14PD128 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 [a] x y) + // result: (Select0 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD128) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x4 x) - // result: (VRSQRT14PD256 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select0 (DIVWU x y)) for { x := v_0 - v.reset(OpAMD64VRSQRT14PD256) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ApproximateReciprocalOfSqrtFloat64x8 x) - // result: (VRSQRT14PD512 x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 [a] x y) + // result: (Select0 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 - v.reset(OpAMD64VRSQRT14PD512) - v.AddArg(x) + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd32 ptr val mem) - // result: (AddTupleFirst32 val (XADDLlock val ptr mem)) + // match: (Div32u x y) + // result: (Select0 (DIVLU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst32) - v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicAdd64 ptr val mem) - // result: (AddTupleFirst64 val (XADDQlock val ptr mem)) + // match: (Div64 [a] x y) + // result: (Select0 (DIVQ [a] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64AddTupleFirst64) - v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) - v.AddArg2(val, v0) + a := auxIntToBool(v.AuxInt) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32 ptr val mem) - // result: (ANDLlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select0 (DIVQU x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd32value ptr val mem) - // result: (LoweredAtomicAnd32 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpDiv8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd64value ptr val mem) - // result: (LoweredAtomicAnd64 ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicAnd64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicAnd8 ptr val mem) - // result: (ANDBlock ptr val mem) + b := v.Block + // match: (Eq16 x y) + // result: (SETEQ (CMPW x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ANDBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap32 ptr old new_ mem) - // result: (CMPXCHGLlock ptr old new_ mem) + b := v.Block + // match: (Eq32 x y) + // result: (SETEQ (CMPL x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGLlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool { - v_3 := v.Args[3] - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicCompareAndSwap64 ptr old new_ mem) - // result: (CMPXCHGQlock ptr old new_ mem) + b := v.Block + // match: (Eq32F x y) + // result: (SETEQF (UCOMISS x y)) for { - ptr := v_0 - old := v_1 - new_ := v_2 - mem := v_3 - v.reset(OpAMD64CMPXCHGQlock) - v.AddArg4(ptr, old, new_, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange32 ptr val mem) - // result: (XCHGL val ptr mem) + b := v.Block + // match: (Eq64 x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGL) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange64 ptr val mem) - // result: (XCHGQ val ptr mem) + b := v.Block + // match: (Eq64F x y) + // result: (SETEQF (UCOMISD x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGQ) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicExchange8 ptr val mem) - // result: (XCHGB val ptr mem) + b := v.Block + // match: (Eq8 x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64XCHGB) - v.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool { +func rewriteValueAMD64_OpEqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad32 ptr mem) - // result: (MOVLatomicload ptr mem) + b := v.Block + // match: (EqB x y) + // result: (SETEQ (CMPB x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVLatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool { +func rewriteValueAMD64_OpEqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad64 ptr mem) - // result: (MOVQatomicload ptr mem) + b := v.Block + // match: (EqPtr x y) + // result: (SETEQ (CMPQ x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoad8 ptr mem) - // result: (MOVBatomicload ptr mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [0] x y)) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVBatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool { +func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicLoadPtr ptr mem) - // result: (MOVQatomicload ptr mem) + // match: (EqualFloat32x4 x y) + // result: (VCMPPS128 [0] x y) for { - ptr := v_0 - mem := v_1 - v.reset(OpAMD64MOVQatomicload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32 ptr val mem) - // result: (ORLlock ptr val mem) + // match: (EqualFloat32x8 x y) + // result: (VCMPPS256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORLlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr32value ptr val mem) - // result: (LoweredAtomicOr32 ptr val mem) + // match: (EqualFloat64x2 x y) + // result: (VCMPPD128 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr32) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr64value ptr val mem) - // result: (LoweredAtomicOr64 ptr val mem) + // match: (EqualFloat64x4 x y) + // result: (VCMPPD256 [0] x y) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64LoweredAtomicOr64) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpAtomicOr8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AtomicOr8 ptr val mem) - // result: (ORBlock ptr val mem) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [0] x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpAMD64ORBlock) - v.AddArg3(ptr, val, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore32 ptr val mem) - // result: (Select1 (XCHGL val ptr mem)) + // match: (EqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPEQW512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore64 ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPEQD512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQD512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStore8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStore8 ptr val mem) - // result: (Select1 (XCHGB val ptr mem)) + // match: (EqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPEQQ512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (AtomicStorePtrNoWB ptr val mem) - // result: (Select1 (XCHGQ val ptr mem)) + // match: (EqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPEQB512 x y)) for { - ptr := v_0 - val := v_1 - mem := v_2 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem)) - v0.AddArg3(val, ptr, mem) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQB512, typ.Mask) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x16 x y) - // result: (VPAVGW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x32 x y) - // result: (VPAVGW512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint16x8 x y) - // result: (VPAVGW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x16 x y) - // result: (VPAVGB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x32 x y) - // result: (VPAVGB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (AverageUint8x64 x y) - // result: (VPAVGB512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPAVGB512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpBitLen16(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) - for { - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) - v.AddArg(v0) - return true - } - // match: (BitLen16 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) + // match: (EqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen32(v *Value) bool { +func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) + // match: (EqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64) - v1.AuxInt = int32ToAuxInt(1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v2.AddArg(x) - v1.AddArg2(v2, v2) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL x))) +} +func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen64(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) + // match: (EqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t) - v1 := b.NewValue0(v.Pos, OpSelect0, t) - v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v2.AddArg(x) - v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v3.AuxInt = int64ToAuxInt(-1) - v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v4.AddArg(v2) - v0.AddArg3(v1, v3, v4) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-64] (LZCNTQ x))) +} +func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] x y)) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-64) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpBitLen8(v *Value) bool { +func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) + // match: (EqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSRL) - v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32) - v1.AddArg(x) - v0.AddArg2(v1, v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v0.AddArg2(x, y) v.AddArg(v0) return true } - // match: (BitLen8 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) +} +func rewriteValueAMD64_OpFMA(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (FMA x y z) + // result: (VFMADD231SD z x y) for { - t := v.Type x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64NEGQ) - v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t) - v0.AuxInt = int32ToAuxInt(-32) - v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32) - v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type) - v2.AddArg(x) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) + y := v_1 + z := v_2 + v.reset(OpAMD64VFMADD231SD) + v.AddArg3(z, x, y) return true } - return false } -func rewriteValueAMD64_OpBswap16(v *Value) bool { +func rewriteValueAMD64_OpFloor(v *Value) bool { v_0 := v.Args[0] - // match: (Bswap16 x) - // result: (ROLWconst [8] x) + // match: (Floor x) + // result: (ROUNDSD [1] x) for { x := v_0 - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(8) + v.reset(OpAMD64ROUNDSD) + v.AuxInt = int8ToAuxInt(1) v.AddArg(x) return true } } -func rewriteValueAMD64_OpCeil(v *Value) bool { +func rewriteValueAMD64_OpGetG(v *Value) bool { v_0 := v.Args[0] - // match: (Ceil x) - // result: (ROUNDSD [2] x) + // match: (GetG mem) + // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal + // result: (LoweredGetG mem) for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(2) - v.AddArg(x) + mem := v_0 + if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { + break + } + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) return true } + return false } -func rewriteValueAMD64_OpCondSelect(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (CondSelect x y (SETEQ cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQ y x cond) + // match: (GreaterEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x4 x y) + // result: (VCMPPS128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETL cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat32x8 x y) + // result: (VCMPPS256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETG cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x2 x y) + // result: (VCMPPD128 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterEqualFloat64x4 x y) + // result: (VCMPPD256 [5] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(5) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (CMOVQGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64CMOVQGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is32BitInt(t) - // result: (CMOVLNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is32BitInt(t) - // result: (CMOVLLT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is32BitInt(t) - // result: (CMOVLGT y x cond) +} +func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is32BitInt(t) - // result: (CMOVLGE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is32BitInt(t) - // result: (CMOVLHI y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is32BitInt(t) - // result: (CMOVLCS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is32BitInt(t) - // result: (CMOVLCC y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is32BitInt(t) - // result: (CMOVLLS y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is32BitInt(t) - // result: (CMOVLEQF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLNEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGTF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is32BitInt(t) - // result: (CMOVLGEF y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQ cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQ y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQ { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQ) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNE cond)) - // cond: is16BitInt(t) - // result: (CMOVWNE y x cond) +} +func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETL cond)) - // cond: is16BitInt(t) - // result: (CMOVWLT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETL { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETG cond)) - // cond: is16BitInt(t) - // result: (CMOVWGT y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x4 x y) + // result: (VCMPPS128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETG { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGT) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETLE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat32x8 x y) + // result: (VCMPPS256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETLE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETGE cond)) - // cond: is16BitInt(t) - // result: (CMOVWGE y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x2 x y) + // result: (VCMPPD128 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGE) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETA cond)) - // cond: is16BitInt(t) - // result: (CMOVWHI y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (GreaterFloat64x4 x y) + // result: (VCMPPD256 [6] x y) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETA { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWHI) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(6) + v.AddArg2(x, y) return true } - // match: (CondSelect x y (SETB cond)) - // cond: is16BitInt(t) - // result: (CMOVWCS y x cond) +} +func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETB { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETAE cond)) - // cond: is16BitInt(t) - // result: (CMOVWCC y x cond) +} +func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPGTW512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETAE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWCC) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETBE cond)) - // cond: is16BitInt(t) - // result: (CMOVWLS y x cond) +} +func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPGTD512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETBE { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWLS) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTD512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETEQF cond)) - // cond: is16BitInt(t) - // result: (CMOVWEQF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPGTQ128 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETEQF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWEQF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETNEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWNEF y x cond) +} +func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPGTQ512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETNEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGTF y x cond) +} +func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPGTB512 x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGTF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTB512, typ.Mask) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y (SETGEF cond)) - // cond: is16BitInt(t) - // result: (CMOVWGEF y x cond) +} +func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - if v_2.Op != OpAMD64SETGEF { - break - } - cond := v_2.Args[0] - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWGEF) - v.AddArg3(y, x, cond) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 1 - // result: (CondSelect x y (MOVBQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 1) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 2 - // result: (CondSelect x y (MOVWQZX check)) +} +func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 2) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 4 - // result: (CondSelect x y (MOVLQZX check)) +} +func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 4) { - break - } - v.reset(OpCondSelect) - v.Type = t - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64) - v0.AddArg(check) - v.AddArg3(x, y, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) - // result: (CMOVQNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) { - break - } - v.reset(OpAMD64CMOVQNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) - // result: (CMOVLNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) { - break - } - v.reset(OpAMD64CMOVLNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (CondSelect x y check) - // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) - // result: (CMOVWNE y x (CMPQconst [0] check)) +} +func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] x y)) for { - t := v.Type x := v_0 y := v_1 - check := v_2 - if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) { - break - } - v.reset(OpAMD64CMOVWNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v0.AddArg(check) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [c]) - // result: (MOVLconst [int32(c)]) +func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] x y)) for { - c := auxIntToInt16(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [c]) - // result: (MOVLconst [int32(c)]) - for { - c := auxIntToInt8(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } -} -func rewriteValueAMD64_OpConstBool(v *Value) bool { - // match: (ConstBool [c]) - // result: (MOVLconst [b2i32(c)]) - for { - c := auxIntToBool(v.AuxInt) - v.reset(OpAMD64MOVLconst) - v.AuxInt = int32ToAuxInt(b2i32(c)) - return true - } -} -func rewriteValueAMD64_OpConstNil(v *Value) bool { - // match: (ConstNil ) - // result: (MOVQconst [0]) - for { - v.reset(OpAMD64MOVQconst) - v.AuxInt = int64ToAuxInt(0) + x := v_0 + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz16 x) - // result: (BSFL (ORLconst [1<<16] x)) + // match: (GreaterUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] x y)) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 16) - v0.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz16NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ (BTSQconst [32] x))) + // match: (GreaterUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64) - v1.AuxInt = int8ToAuxInt(32) - v1.AddArg(x) - v0.AddArg(v1) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool { +func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) - for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) - return true - } - // match: (Ctz32NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) + b := v.Block + typ := &b.Func.Config.Types + // match: (GreaterUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64(v *Value) bool { - v_0 := v.Args[0] +func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (HasCPUFeature {s}) + // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) for { - x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + s := auxToSym(v.Aux) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v0.AuxInt = int32ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) + v1.Aux = symToAux(s) + v0.AddArg(v1) + v.AddArg(v0) return true } - // match: (Ctz64 x) - // cond: buildcfg.GOAMD64 < 3 - // result: (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) +} +func rewriteValueAMD64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (IsInBounds idx len) + // result: (SETB (CMPQ idx len)) for { - t := v.Type - x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64CMOVQEQ) - v0 := b.NewValue0(v.Pos, OpSelect0, t) - v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t) - v2.AuxInt = int64ToAuxInt(64) - v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) - v3.AddArg(v1) - v.AddArg3(v0, v2, v3) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTQ x) + // match: (IsNanFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [3] x y)) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTQ) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v0.AddArg2(x, y) + v.AddArg(v0) return true } - // match: (Ctz64NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (Select0 (BSFQ x)) +} +func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat32x4 x y) + // result: (VCMPPS128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags)) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpCtz8(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Ctz8 x) - // result: (BSFL (ORLconst [1<<8 ] x)) + // match: (IsNanFloat32x8 x y) + // result: (VCMPPS256 [3] x y) for { x := v_0 - v.reset(OpAMD64BSFL) - v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32) - v0.AuxInt = int32ToAuxInt(1 << 8) - v0.AddArg(x) - v.AddArg(v0) + y := v_1 + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 >= 3 - // result: (TZCNTL x) + // match: (IsNanFloat64x2 x y) + // result: (VCMPPD128 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 >= 3) { - break - } - v.reset(OpAMD64TZCNTL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - // match: (Ctz8NonZero x) - // cond: buildcfg.GOAMD64 < 3 - // result: (BSFL x) +} +func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsNanFloat64x4 x y) + // result: (VCMPPD256 [3] x y) for { x := v_0 - if !(buildcfg.GOAMD64 < 3) { - break - } - v.reset(OpAMD64BSFL) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(3) + v.AddArg2(x, y) return true } - return false } -func rewriteValueAMD64_OpDiv16(v *Value) bool { +func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (Div16 [a] x y) - // result: (Select0 (DIVW [a] x y)) + // match: (IsNanFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [3] x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv16u(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpIsNonNil(v *Value) bool { v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div16u x y) - // result: (Select0 (DIVWU x y)) + // match: (IsNonNil p) + // result: (SETNE (TESTQ p p)) for { - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) + p := v_0 + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) + v0.AddArg2(p, p) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32(v *Value) bool { +func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32 [a] x y) - // result: (Select0 (DIVL [a] x y)) + // match: (IsSliceInBounds idx len) + // result: (SETBE (CMPQ idx len)) for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) + idx := v_0 + len := v_1 + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(idx, len) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv32u(v *Value) bool { +func rewriteValueAMD64_OpLeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div32u x y) - // result: (Select0 (DIVLU x y)) + // match: (Leq16 x y) + // result: (SETLE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64(v *Value) bool { +func rewriteValueAMD64_OpLeq16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64 [a] x y) - // result: (Select0 (DIVQ [a] x y)) + // match: (Leq16U x y) + // result: (SETBE (CMPW x y)) for { - a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv64u(v *Value) bool { +func rewriteValueAMD64_OpLeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div64u x y) - // result: (Select0 (DIVQU x y)) + // match: (Leq32 x y) + // result: (SETLE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8(v *Value) bool { +func rewriteValueAMD64_OpLeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8 x y) - // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) + // match: (Leq32F x y) + // result: (SETGEF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDiv8u(v *Value) bool { +func rewriteValueAMD64_OpLeq32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Div8u x y) - // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + // match: (Leq32U x y) + // result: (SETBE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x16 x y) - // result: (VDIVPS512 y x) + b := v.Block + // match: (Leq64 x y) + // result: (SETLE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS512) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x4 x y) - // result: (VDIVPS128 y x) + b := v.Block + // match: (Leq64F x y) + // result: (SETGEF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS128) - v.AddArg2(y, x) + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLeq64U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat32x8 x y) - // result: (VDIVPS256 y x) + b := v.Block + // match: (Leq64U x y) + // result: (SETBE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPS256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x2 x y) - // result: (VDIVPD128 y x) + b := v.Block + // match: (Leq8 x y) + // result: (SETLE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD128) - v.AddArg2(y, x) + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLeq8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x4 x y) - // result: (VDIVPD256 y x) + b := v.Block + // match: (Leq8U x y) + // result: (SETBE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD256) - v.AddArg2(y, x) + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLess16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (DivFloat64x8 x y) - // result: (VDIVPD512 y x) + b := v.Block + // match: (Less16 x y) + // result: (SETL (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VDIVPD512) - v.AddArg2(y, x) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq16(v *Value) bool { +func rewriteValueAMD64_OpLess16U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq16 x y) - // result: (SETEQ (CMPW x y)) + // match: (Less16U x y) + // result: (SETB (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETB) v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32(v *Value) bool { +func rewriteValueAMD64_OpLess32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32 x y) - // result: (SETEQ (CMPL x y)) + // match: (Less32 x y) + // result: (SETL (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq32F(v *Value) bool { +func rewriteValueAMD64_OpLess32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq32F x y) - // result: (SETEQF (UCOMISS x y)) + // match: (Less32F x y) + // result: (SETGF (UCOMISS y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) + v.reset(OpAMD64SETGF) v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) + v0.AddArg2(y, x) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64(v *Value) bool { +func rewriteValueAMD64_OpLess32U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64 x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less32U x y) + // result: (SETB (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq64F(v *Value) bool { +func rewriteValueAMD64_OpLess64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq64F x y) - // result: (SETEQF (UCOMISD x y)) + // match: (Less64 x y) + // result: (SETL (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEq8(v *Value) bool { +func rewriteValueAMD64_OpLess64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Eq8 x y) - // result: (SETEQ (CMPB x y)) + // match: (Less64F x y) + // result: (SETGF (UCOMISD y x)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64U x y) + // result: (SETB (CMPQ x y)) + for { + x := v_0 + y := v_1 + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqB(v *Value) bool { +func rewriteValueAMD64_OpLess8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqB x y) - // result: (SETEQ (CMPB x y)) + // match: (Less8 x y) + // result: (SETL (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) + v.reset(OpAMD64SETL) v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqPtr(v *Value) bool { +func rewriteValueAMD64_OpLess8U(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (EqPtr x y) - // result: (SETEQ (CMPQ x y)) + // match: (Less8U x y) + // result: (SETB (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETEQ) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [0] y x)) + // match: (LessEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x4 x y) - // result: (VCMPPS128 [0] y x) + // match: (LessEqualFloat32x4 x y) + // result: (VCMPPS128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat32x8 x y) - // result: (VCMPPS256 [0] y x) + // match: (LessEqualFloat32x8 x y) + // result: (VCMPPS256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x2 x y) - // result: (VCMPPD128 [0] y x) + // match: (LessEqualFloat64x2 x y) + // result: (VCMPPD128 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualFloat64x4 x y) - // result: (VCMPPD256 [0] y x) + // match: (LessEqualFloat64x4 x y) + // result: (VCMPPD256 [2] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(0) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(2) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [0] y x)) + // match: (LessEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x16 x y) - // result: (VPCMPEQW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPEQW512 y x)) + // match: (LessEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQW512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt16x8 x y) - // result: (VPCMPEQW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [0] y x)) + // match: (LessEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x4 x y) - // result: (VPCMPEQD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt32x8 x y) - // result: (VPCMPEQD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x2 x y) - // result: (VPCMPEQQ128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt64x4 x y) - // result: (VPCMPEQQ256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQQ256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPEQQ512 y x)) + // match: (LessEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQ512, typ.Mask) - v0.AddArg2(y, x) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x16 x y) - // result: (VPCMPEQB128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (EqualInt8x32 x y) - // result: (VPCMPEQB256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (LessEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [2] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPEQB256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [0] y x)) + // match: (LessEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [0] y x)) + // match: (LessEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [0] y x)) + // match: (LessEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [0] y x)) + // match: (LessEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [0] y x)) + // match: (LessEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [0] y x)) + // match: (LessEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [0] y x)) + // match: (LessEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [0] y x)) + // match: (LessEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [0] y x)) + // match: (LessEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [0] y x)) + // match: (LessEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [0] y x)) + // match: (LessEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [0] y x)) + // match: (LessEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (EqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [0] y x)) + // match: (LessEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(2) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpFMA(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (FMA x y z) - // result: (VFMADD231SD z x y) - for { - x := v_0 - y := v_1 - z := v_2 - v.reset(OpAMD64VFMADD231SD) - v.AddArg3(z, x, y) - return true - } -} -func rewriteValueAMD64_OpFloor(v *Value) bool { - v_0 := v.Args[0] - // match: (Floor x) - // result: (ROUNDSD [1] x) - for { - x := v_0 - v.reset(OpAMD64ROUNDSD) - v.AuxInt = int8ToAuxInt(1) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpGetG(v *Value) bool { - v_0 := v.Args[0] - // match: (GetG mem) - // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal - // result: (LoweredGetG mem) - for { - mem := v_0 - if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) { - break - } - v.reset(OpAMD64LoweredGetG) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [5] y x)) + // match: (LessFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x4 x y) - // result: (VCMPPS128 [5] y x) + // match: (LessFloat32x4 x y) + // result: (VCMPPS128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat32x8 x y) - // result: (VCMPPS256 [5] y x) + // match: (LessFloat32x8 x y) + // result: (VCMPPS256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x2 x y) - // result: (VCMPPD128 [5] y x) + // match: (LessFloat64x2 x y) + // result: (VCMPPD128 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterEqualFloat64x4 x y) - // result: (VCMPPD256 [5] y x) + // match: (LessFloat64x4 x y) + // result: (VCMPPD256 [1] x y) for { x := v_0 y := v_1 v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(5) - v.AddArg2(y, x) + v.AuxInt = int8ToAuxInt(1) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [5] y x)) + // match: (LessFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [5] y x)) + // match: (LessInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [5] y x)) + // match: (LessInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [5] y x)) + // match: (LessInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [5] y x)) + // match: (LessInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [5] y x)) + // match: (LessInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [5] y x)) + // match: (LessInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [5] y x)) + // match: (LessInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [5] y x)) + // match: (LessInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [5] y x)) + // match: (LessInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [5] y x)) + // match: (LessInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [5] y x)) + // match: (LessInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [5] y x)) + // match: (LessInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [5] y x)) + // match: (LessUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [5] y x)) + // match: (LessUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [5] y x)) + // match: (LessUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) - v.AddArg(v0) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [5] y x)) + // match: (LessUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [5] y x)) + // match: (LessUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [5] y x)) + // match: (LessUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [5] y x)) + // match: (LessUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [5] y x)) + // match: (LessUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [5] y x)) + // match: (LessUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [5] y x)) + // match: (LessUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [5] y x)) + // match: (LessUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [5] y x)) + // match: (LessUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] x y)) for { x := v_0 y := v_1 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v0.AddArg2(y, x) + v0.AuxInt = int8ToAuxInt(1) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpLoad(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [6] y x)) + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x4 x y) - // result: (VCMPPS128 [6] y x) + // match: (Load ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat32x8 x y) - // result: (VCMPPS256 [6] y x) + // match: (Load ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x2 x y) - // result: (VCMPPD128 [6] y x) + // match: (Load ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterFloat64x4 x y) - // result: (VCMPPD256 [6] y x) + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(6) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [6] y x)) + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x16 x y) - // result: (VPCMPGTW256 y x) + // match: (Load ptr mem) + // cond: t.Size() == 16 + // result: (VMOVDQUload128 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW256) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 16) { + break + } + v.reset(OpAMD64VMOVDQUload128) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPGTW512 y x)) + // match: (Load ptr mem) + // cond: t.Size() == 32 + // result: (VMOVDQUload256 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTW512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 32) { + break + } + v.reset(OpAMD64VMOVDQUload256) + v.AddArg2(ptr, mem) return true } -} -func rewriteValueAMD64_OpGreaterInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt16x8 x y) - // result: (VPCMPGTW128 y x) + // match: (Load ptr mem) + // cond: t.Size() == 64 + // result: (VMOVDQUload512 ptr mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPCMPGTW128) - v.AddArg2(y, x) + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.Size() == 64) { + break + } + v.reset(OpAMD64VMOVDQUload512) + v.AddArg2(ptr, mem) return true } + return false } -func rewriteValueAMD64_OpGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpLocalAddr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (GreaterInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [6] y x)) + // match: (LocalAddr {sym} base mem) + // cond: t.Elem().HasPointers() + // result: (LEAQ {sym} (SPanchored base mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + mem := v_1 + if !(t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) + v0.AddArg2(base, mem) v.AddArg(v0) return true } + // match: (LocalAddr {sym} base _) + // cond: !t.Elem().HasPointers() + // result: (LEAQ {sym} base) + for { + t := v.Type + sym := auxToSym(v.Aux) + base := v_0 + if !(!t.Elem().HasPointers()) { + break + } + v.reset(OpAMD64LEAQ) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } + return false } -func rewriteValueAMD64_OpGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt32x4 x y) - // result: (VPCMPGTD128 y x) + b := v.Block + // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD128) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt32x8 x y) - // result: (VPCMPGTD256 y x) + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTD256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPGTQ128 y x)) + // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ128, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt64x4 x y) - // result: (VPCMPGTQ256 y x) + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTQ256) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPGTQ512 y x)) + // match: (Lsh16x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQ512, typ.Mask) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (GreaterInt8x16 x y) - // result: (VPCMPGTB128 y x) + // match: (Lsh16x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB128) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpLsh16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (GreaterInt8x32 x y) - // result: (VPCMPGTB256 y x) + b := v.Block + // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPCMPGTB256) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [6] y x)) + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [6] y x)) + // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [6] y x)) + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpGreaterUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [6] y x)) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [6] y x)) + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [6] y x)) + // match: (Lsh32x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [6] y x)) + // match: (Lsh32x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpLsh32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [6] y x)) + // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [6] y x)) + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpLsh64x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [6] y x)) + // match: (Lsh64x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpGreaterUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [6] y x)) + // match: (Lsh64x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpLsh64x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (GreaterUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [6] y x)) + // match: (Lsh64x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool { - b := v.Block - typ := &b.Func.Config.Types - // match: (HasCPUFeature {s}) - // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) + // match: (Lsh64x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { - s := auxToSym(v.Aux) - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64) - v1.Aux = symToAux(s) - v0.AddArg(v1) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh64x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsInBounds idx len) - // result: (SETB (CMPQ idx len)) + // match: (Lsh64x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [3] y x)) + // match: (Lsh64x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpLsh64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat32x4 x y) - // result: (VCMPPS128 [3] y x) + b := v.Block + // match: (Lsh64x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(64) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat32x8 x y) - // result: (VCMPPS256 [3] y x) + // match: (Lsh64x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLQ x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLQ) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (IsNanFloat64x2 x y) - // result: (VCMPPD128 [3] y x) + b := v.Block + // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) + v2.AuxInt = int16ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNanFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (IsNanFloat64x4 x y) - // result: (VCMPPD256 [3] y x) + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(3) - v.AddArg2(y, x) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (IsNanFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [3] y x)) + // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v0.AddArg2(y, x) - v.AddArg(v0) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpIsNonNil(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - // match: (IsNonNil p) - // result: (SETNE (TESTQ p p)) + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { - p := v_0 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags) - v0.AddArg2(p, p) - v.AddArg(v0) + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { +func rewriteValueAMD64_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (IsSliceInBounds idx len) - // result: (SETBE (CMPQ idx len)) + // match: (Lsh8x64 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) for { - idx := v_0 - len := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(idx, len) - v.AddArg(v0) + t := v.Type + x := v_0 + y := v_1 + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) + v2.AuxInt = int32ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq16 x y) - // result: (SETLE (CMPW x y)) + // match: (Lsh8x64 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq16U(v *Value) bool { +func rewriteValueAMD64_OpLsh8x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq16U x y) - // result: (SETBE (CMPW x y)) + // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) + // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + if !(!shiftIsBounded(v)) { + break + } + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) v0.AddArg2(x, y) - v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) + v2.AuxInt = int8ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } -} -func rewriteValueAMD64_OpLeq32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Leq32 x y) - // result: (SETLE (CMPL x y)) + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) for { x := v_0 y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + if !(shiftIsBounded(v)) { + break + } + v.reset(OpAMD64SHLL) + v.AddArg2(x, y) return true } + return false } -func rewriteValueAMD64_OpLeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32F x y) - // result: (SETGEF (UCOMISS y x)) + // match: (MaskedAbsoluteInt16x16 x mask) + // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq32U x y) - // result: (SETBE (CMPL x y)) + // match: (MaskedAbsoluteInt16x32 x mask) + // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64 x y) - // result: (SETLE (CMPQ x y)) + // match: (MaskedAbsoluteInt16x8 x mask) + // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64F x y) - // result: (SETGEF (UCOMISD y x)) + // match: (MaskedAbsoluteInt32x16 x mask) + // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq64U x y) - // result: (SETBE (CMPQ x y)) + // match: (MaskedAbsoluteInt32x4 x mask) + // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8 x y) - // result: (SETLE (CMPB x y)) + // match: (MaskedAbsoluteInt32x8 x mask) + // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETLE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLeq8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Leq8U x y) - // result: (SETBE (CMPB x y)) + // match: (MaskedAbsoluteInt64x2 x mask) + // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETBE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16 x y) - // result: (SETL (CMPW x y)) + // match: (MaskedAbsoluteInt64x4 x mask) + // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess16U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less16U x y) - // result: (SETB (CMPW x y)) + // match: (MaskedAbsoluteInt64x8 x mask) + // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32 x y) - // result: (SETL (CMPL x y)) + // match: (MaskedAbsoluteInt8x16 x mask) + // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32F x y) - // result: (SETGF (UCOMISS y x)) + // match: (MaskedAbsoluteInt8x32 x mask) + // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess32U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less32U x y) - // result: (SETB (CMPL x y)) + // match: (MaskedAbsoluteInt8x64 x mask) + // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_1 + v.reset(OpAMD64VPABSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpLess64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64 x y) - // result: (SETL (CMPQ x y)) + // match: (MaskedAddFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64F x y) - // result: (SETGF (UCOMISD y x)) + // match: (MaskedAddFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETGF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess64U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less64U x y) - // result: (SETB (CMPQ x y)) + // match: (MaskedAddFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8 x y) - // result: (SETL (CMPB x y)) + // match: (MaskedAddFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETL) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLess8U(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Less8U x y) - // result: (SETB (CMPB x y)) + // match: (MaskedAddFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [2] y x)) + // match: (MaskedAddFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x4 x y) - // result: (VCMPPS128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat32x8 x y) - // result: (VCMPPS256 [2] y x) + b := v.Block + // match: (MaskedAddInt16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x2 x y) - // result: (VCMPPD128 [2] y x) + b := v.Block + // match: (MaskedAddInt16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessEqualFloat64x4 x y) - // result: (VCMPPD256 [2] y x) + b := v.Block + // match: (MaskedAddInt32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(2) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [2] y x)) + // match: (MaskedAddInt32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [2] y x)) + // match: (MaskedAddInt32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [2] y x)) + // match: (MaskedAddInt64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [2] y x)) + // match: (MaskedAddInt64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [2] y x)) + // match: (MaskedAddInt64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [2] y x)) + // match: (MaskedAddInt8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [2] y x)) + // match: (MaskedAddInt8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [2] y x)) + // match: (MaskedAddInt8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [2] y x)) + // match: (MaskedAddUint16x16 x y mask) + // result: (VPADDWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [2] y x)) + // match: (MaskedAddUint16x32 x y mask) + // result: (VPADDWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [2] y x)) + // match: (MaskedAddUint16x8 x y mask) + // result: (VPADDWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [2] y x)) + // match: (MaskedAddUint32x16 x y mask) + // result: (VPADDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [2] y x)) + // match: (MaskedAddUint32x4 x y mask) + // result: (VPADDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [2] y x)) + // match: (MaskedAddUint32x8 x y mask) + // result: (VPADDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [2] y x)) + // match: (MaskedAddUint64x2 x y mask) + // result: (VPADDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [2] y x)) + // match: (MaskedAddUint64x4 x y mask) + // result: (VPADDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [2] y x)) + // match: (MaskedAddUint64x8 x y mask) + // result: (VPADDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [2] y x)) + // match: (MaskedAddUint8x16 x y mask) + // result: (VPADDBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [2] y x)) + // match: (MaskedAddUint8x32 x y mask) + // result: (VPADDBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [2] y x)) + // match: (MaskedAddUint8x64 x y mask) + // result: (VPADDBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPADDBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [2] y x)) + // match: (MaskedAndFloat32x16 x y mask) + // result: (VANDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [2] y x)) + // match: (MaskedAndFloat32x4 x y mask) + // result: (VANDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [2] y x)) + // match: (MaskedAndFloat32x8 x y mask) + // result: (VANDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [2] y x)) + // match: (MaskedAndFloat64x2 x y mask) + // result: (VANDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [2] y x)) + // match: (MaskedAndFloat64x4 x y mask) + // result: (VANDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [1] y x)) + // match: (MaskedAndFloat64x8 x y mask) + // result: (VANDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x4 x y) - // result: (VCMPPS128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat32x8 x y) - // result: (VCMPPS256 [1] y x) + b := v.Block + // match: (MaskedAndInt32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x2 x y) - // result: (VCMPPD128 [1] y x) + b := v.Block + // match: (MaskedAndInt32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (LessFloat64x4 x y) - // result: (VCMPPD256 [1] y x) + b := v.Block + // match: (MaskedAndInt64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(1) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [1] y x)) + // match: (MaskedAndInt64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [1] y x)) + // match: (MaskedAndInt64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [1] y x)) + // match: (MaskedAndNotFloat32x16 x y mask) + // result: (VANDNPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [1] y x)) + // match: (MaskedAndNotFloat32x4 x y mask) + // result: (VANDNPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [1] y x)) + // match: (MaskedAndNotFloat32x8 x y mask) + // result: (VANDNPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [1] y x)) + // match: (MaskedAndNotFloat64x2 x y mask) + // result: (VANDNPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [1] y x)) + // match: (MaskedAndNotFloat64x4 x y mask) + // result: (VANDNPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [1] y x)) + // match: (MaskedAndNotFloat64x8 x y mask) + // result: (VANDNPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VANDNPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [1] y x)) + // match: (MaskedAndNotInt32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [1] y x)) + // match: (MaskedAndNotInt32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [1] y x)) + // match: (MaskedAndNotInt32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [1] y x)) + // match: (MaskedAndNotInt64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [1] y x)) + // match: (MaskedAndNotInt64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [1] y x)) + // match: (MaskedAndNotInt64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpLessUint16x32(v *Value) bool { + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [1] y x)) + // match: (MaskedAndNotUint32x16 x y mask) + // result: (VPANDNDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [1] y x)) + // match: (MaskedAndNotUint32x4 x y mask) + // result: (VPANDNDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [1] y x)) + // match: (MaskedAndNotUint32x8 x y mask) + // result: (VPANDNDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [1] y x)) + // match: (MaskedAndNotUint64x2 x y mask) + // result: (VPANDNQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [1] y x)) + // match: (MaskedAndNotUint64x4 x y mask) + // result: (VPANDNQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [1] y x)) + // match: (MaskedAndNotUint64x8 x y mask) + // result: (VPANDNQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDNQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [1] y x)) + // match: (MaskedAndUint32x16 x y mask) + // result: (VPANDDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [1] y x)) + // match: (MaskedAndUint32x4 x y mask) + // result: (VPANDDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [1] y x)) + // match: (MaskedAndUint32x8 x y mask) + // result: (VPANDDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [1] y x)) + // match: (MaskedAndUint64x2 x y mask) + // result: (VPANDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LessUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [1] y x)) + // match: (MaskedAndUint64x4 x y mask) + // result: (VPANDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPANDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpLoad(v *Value) bool { +func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Load ptr mem) - // cond: (is64BitInt(t) || isPtr(t)) - // result: (MOVQload ptr mem) + b := v.Block + // match: (MaskedAndUint64x8 x y mask) + // result: (VPANDQMasked512 x y (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitInt(t) || isPtr(t)) { - break - } - v.reset(OpAMD64MOVQload) - v.AddArg2(ptr, mem) + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPANDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Load ptr mem) - // cond: is32BitInt(t) - // result: (MOVLload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x16 x mask) + // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitInt(t)) { - break - } - v.reset(OpAMD64MOVLload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is16BitInt(t) - // result: (MOVWload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x4 x mask) + // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is16BitInt(t)) { - break - } - v.reset(OpAMD64MOVWload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: (t.IsBoolean() || is8BitInt(t)) - // result: (MOVBload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat32x8 x mask) + // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.IsBoolean() || is8BitInt(t)) { - break - } - v.reset(OpAMD64MOVBload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVSSload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x2 x mask) + // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is32BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVSDload ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x4 x mask) + // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(is64BitFloat(t)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 16 - // result: (VMOVDQUload128 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalFloat64x8 x mask) + // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 16) { - break - } - v.reset(OpAMD64VMOVDQUload128) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRCP14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 32 - // result: (VMOVDQUload256 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) + // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 32) { - break - } - v.reset(OpAMD64VMOVDQUload256) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Load ptr mem) - // cond: t.Size() == 64 - // result: (VMOVDQUload512 ptr mem) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) + // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) for { - t := v.Type - ptr := v_0 - mem := v_1 - if !(t.Size() == 64) { - break - } - v.reset(OpAMD64VMOVDQUload512) - v.AddArg2(ptr, mem) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLocalAddr(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (LocalAddr {sym} base mem) - // cond: t.Elem().HasPointers() - // result: (LEAQ {sym} (SPanchored base mem)) + // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) + // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - mem := v_1 - if !(t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr) - v0.AddArg2(base, mem) - v.AddArg(v0) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (LocalAddr {sym} base _) - // cond: !t.Elem().HasPointers() - // result: (LEAQ {sym} base) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) + // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) for { - t := v.Type - sym := auxToSym(v.Aux) - base := v_0 - if !(!t.Elem().HasPointers()) { - break - } - v.reset(OpAMD64LEAQ) - v.Aux = symToAux(sym) - v.AddArg(base) + x := v_0 + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) + // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - // match: (Lsh16x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) + // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_1 + v.reset(OpAMD64VRSQRT14PDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedAverageUint16x16 x y mask) + // result: (VPAVGWMasked256 x y (VPMOVVec16x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint16x32 x y mask) + // result: (VPAVGWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedAverageUint16x8 x y mask) + // result: (VPAVGWMasked128 x y (VPMOVVec16x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x16 x y mask) + // result: (VPAVGBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh16x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + // match: (MaskedAverageUint8x32 x y mask) + // result: (VPAVGBMasked256 x y (VPMOVVec8x32ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh16x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedAverageUint8x64 x y mask) + // result: (VPAVGBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPAVGBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) + // match: (MaskedDivFloat32x16 x y mask) + // result: (VDIVPSMasked512 x y (VPMOVVec32x16ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat32x4 x y mask) + // result: (VDIVPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) + // match: (MaskedDivFloat32x8 x y mask) + // result: (VDIVPSMasked256 x y (VPMOVVec32x8ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x2 x y mask) + // result: (VDIVPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) + // match: (MaskedDivFloat64x4 x y mask) + // result: (VDIVPDMasked256 x y (VPMOVVec64x4ToM mask)) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - // match: (Lsh32x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedDivFloat64x8 x y mask) + // result: (VDIVPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VDIVPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpLsh32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh32x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh32x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPWconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPLconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPQconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 x y (VPMOVVec16x16ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) +} +func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh64x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 x y (VPMOVVec16x8ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(64) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh64x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLQ x y) - for { - x := v_0 - y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLQ) - v.AddArg2(x, y) - return true - } - return false } -func rewriteValueAMD64_OpLsh8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x16 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags) - v2.AuxInt = int16ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x16 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPEQDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x32 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x32 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPEQDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x64 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPQconst y [32]))) - for { - t := v.Type - x := v_0 - y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags) - v2.AuxInt = int32ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) - return true - } - // match: (Lsh8x64 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPEQDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpLsh8x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Lsh8x8 x y) - // cond: !shiftIsBounded(v) - // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 x y (VPMOVVec64x2ToM mask))) for { - t := v.Type x := v_0 y := v_1 - if !(!shiftIsBounded(v)) { - break - } - v.reset(OpAMD64ANDL) - v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t) - v0.AddArg2(x, y) - v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t) - v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags) - v2.AuxInt = int8ToAuxInt(32) - v2.AddArg(y) - v1.AddArg(v2) - v.AddArg2(v0, v1) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - // match: (Lsh8x8 x y) - // cond: shiftIsBounded(v) - // result: (SHLL x y) +} +func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 - if !(shiftIsBounded(v)) { - break - } - v.reset(OpAMD64SHLL) - v.AddArg2(x, y) + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } - return false } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x16 x mask) - // result: (VPABSWMasked256 x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x32 x mask) - // result: (VPABSWMasked512 x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPEQBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt16x8 x mask) - // result: (VPABSWMasked128 x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPEQBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x16 x mask) - // result: (VPABSDMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPEQBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x4 x mask) - // result: (VPABSDMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] x y (VPMOVVec16x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt32x8 x mask) - // result: (VPABSDMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] x y (VPMOVVec16x32ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x2 x mask) - // result: (VPABSQMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] x y (VPMOVVec16x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x4 x mask) - // result: (VPABSQMasked256 x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] x y (VPMOVVec32x16ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt64x8 x mask) - // result: (VPABSQMasked512 x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] x y (VPMOVVec32x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x16 x mask) - // result: (VPABSBMasked128 x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x32 x mask) - // result: (VPABSBMasked256 x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAbsoluteInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAbsoluteInt8x64 x mask) - // result: (VPABSBMasked512 x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VPABSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddInt8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x16 x y mask) - // result: (VPADDWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x32 x y mask) - // result: (VPADDWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint16x8 x y mask) - // result: (VPADDWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x16 x y mask) - // result: (VPADDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x4 x y mask) - // result: (VPADDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint32x8 x y mask) - // result: (VPADDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x2 x y mask) - // result: (VPADDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x4 x y mask) - // result: (VPADDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint64x8 x y mask) - // result: (VPADDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x16 x y mask) - // result: (VPADDBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x32 x y mask) - // result: (VPADDBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAddUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAddUint8x64 x y mask) - // result: (VPADDBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPADDBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x16 x y mask) - // result: (VANDPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x4 x y mask) - // result: (VANDPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat32x8 x y mask) - // result: (VANDPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x2 x y mask) - // result: (VANDPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(5) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x4 x y mask) - // result: (VANDPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndFloat64x8 x y mask) - // result: (VANDPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndInt64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x16 x y mask) - // result: (VANDNPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x4 x y mask) - // result: (VANDNPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPGTDMasked512 x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat32x8 x y mask) - // result: (VANDNPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPGTDMasked128 x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x2 x y mask) - // result: (VANDNPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPGTDMasked256 x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTDMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x4 x y mask) - // result: (VANDNPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotFloat64x8 x y mask) - // result: (VANDNPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VANDNPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPGTBMasked128 x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked128, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPGTBMasked256 x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked256, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPGTBMasked512 x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTBMasked512, typ.Mask) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotInt64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x16 x y mask) - // result: (VPANDNDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x4 x y mask) - // result: (VPANDNDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint32x8 x y mask) - // result: (VPANDNDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x2 x y mask) - // result: (VPANDNQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x4 x y mask) - // result: (VPANDNQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndNotUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndNotUint64x8 x y mask) - // result: (VPANDNQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDNQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x16 x y mask) - // result: (VPANDDMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x4 x y mask) - // result: (VPANDDMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint32x8 x y mask) - // result: (VPANDDMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x2 x y mask) - // result: (VPANDQMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedGreaterUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(6) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x4 x y mask) - // result: (VPANDQMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAndUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAndUint64x8 x y mask) - // result: (VPANDQMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPANDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x16 x mask) - // result: (VRCP14PSMasked512 x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] x y (VPMOVVec32x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x4 x mask) - // result: (VRCP14PSMasked128 x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] x y (VPMOVVec64x2ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat32x8 x mask) - // result: (VRCP14PSMasked256 x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] x y (VPMOVVec64x4ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedApproximateReciprocalFloat64x2 x mask) - // result: (VRCP14PDMasked128 x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedIsNanFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] x y (VPMOVVec64x8ToM mask))) for { x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x4 x mask) - // result: (VRCP14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalFloat64x8 x mask) - // result: (VRCP14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRCP14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x16 x mask) - // result: (VRSQRT14PSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x4 x mask) - // result: (VRSQRT14PSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat32x8 x mask) - // result: (VRSQRT14PSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x2 x mask) - // result: (VRSQRT14PDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x4 x mask) - // result: (VRSQRT14PDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedApproximateReciprocalOfSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedApproximateReciprocalOfSqrtFloat64x8 x mask) - // result: (VRSQRT14PDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VRSQRT14PDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(3) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x16 x y mask) - // result: (VPAVGWMasked256 y x (VPMOVVec16x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x32 x y mask) - // result: (VPAVGWMasked512 y x (VPMOVVec16x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint16x8 x y mask) - // result: (VPAVGWMasked128 y x (VPMOVVec16x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x16 x y mask) - // result: (VPAVGBMasked128 y x (VPMOVVec8x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x32 x y mask) - // result: (VPAVGBMasked256 y x (VPMOVVec8x32ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedAverageUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedAverageUint8x64 x y mask) - // result: (VPAVGBMasked512 y x (VPMOVVec8x64ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPAVGBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x16 x y mask) - // result: (VDIVPSMasked512 y x (VPMOVVec32x16ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x4 x y mask) - // result: (VDIVPSMasked128 y x (VPMOVVec32x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat32x8 x y mask) - // result: (VDIVPSMasked256 y x (VPMOVVec32x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x2 x y mask) - // result: (VDIVPDMasked128 y x (VPMOVVec64x2ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x4 x y mask) - // result: (VDIVPDMasked256 y x (VPMOVVec64x4ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedDivFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedDivFloat64x8 x y mask) - // result: (VDIVPDMasked512 y x (VPMOVVec64x8ToM mask)) + typ := &b.Func.Config.Types + // match: (MaskedLessEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VDIVPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPEQWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPEQWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPEQWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQWMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPEQQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked128, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPEQQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked256, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPEQQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPEQQMasked512, typ.Mask) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(2) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [0] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [0] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [0] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [0] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [0] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [0] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [0] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [0] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [0] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [0] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [0] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [0] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedLessUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedLessUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedLessUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedLessUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedLessUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedLessUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedLessUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedLessUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedLessUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedLessUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedLessUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedLessUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(1) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [5] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxFloat32x16 x y mask) + // result: (VMAXPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [5] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxFloat32x4 x y mask) + // result: (VMAXPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [5] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxFloat32x8 x y mask) + // result: (VMAXPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [5] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxFloat64x2 x y mask) + // result: (VMAXPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [5] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxFloat64x4 x y mask) + // result: (VMAXPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [5] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxFloat64x8 x y mask) + // result: (VMAXPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMAXPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [5] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt16x16 x y mask) + // result: (VPMAXSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [5] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt16x32 x y mask) + // result: (VPMAXSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [5] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt16x8 x y mask) + // result: (VPMAXSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [5] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxInt32x16 x y mask) + // result: (VPMAXSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [5] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxInt32x4 x y mask) + // result: (VPMAXSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [5] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxInt32x8 x y mask) + // result: (VPMAXSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(5) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxInt64x2 x y mask) + // result: (VPMAXSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxInt64x4 x y mask) + // result: (VPMAXSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxInt64x8 x y mask) + // result: (VPMAXSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxInt8x16 x y mask) + // result: (VPMAXSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxInt8x32 x y mask) + // result: (VPMAXSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxInt8x64 x y mask) + // result: (VPMAXSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPGTWMasked256 y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMaxUint16x16 x y mask) + // result: (VPMAXUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPGTWMasked512 y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMaxUint16x32 x y mask) + // result: (VPMAXUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPGTWMasked128 y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMaxUint16x8 x y mask) + // result: (VPMAXUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTWMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMaxUint32x16 x y mask) + // result: (VPMAXUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMaxUint32x4 x y mask) + // result: (VPMAXUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMaxUint32x8 x y mask) + // result: (VPMAXUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPGTQMasked128 y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMaxUint64x2 x y mask) + // result: (VPMAXUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked128, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPGTQMasked256 y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMaxUint64x4 x y mask) + // result: (VPMAXUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked256, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPGTQMasked512 y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMaxUint64x8 x y mask) + // result: (VPMAXUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPGTQMasked512, typ.Mask) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMaxUint8x16 x y mask) + // result: (VPMAXUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMaxUint8x32 x y mask) + // result: (VPMAXUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMaxUint8x64 x y mask) + // result: (VPMAXUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMAXUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [6] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinFloat32x16 x y mask) + // result: (VMINPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [6] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinFloat32x4 x y mask) + // result: (VMINPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [6] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinFloat32x8 x y mask) + // result: (VMINPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [6] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinFloat64x2 x y mask) + // result: (VMINPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [6] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinFloat64x4 x y mask) + // result: (VMINPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [6] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinFloat64x8 x y mask) + // result: (VMINPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMINPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [6] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt16x16 x y mask) + // result: (VPMINSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [6] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt16x32 x y mask) + // result: (VPMINSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [6] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt16x8 x y mask) + // result: (VPMINSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [6] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMinInt32x16 x y mask) + // result: (VPMINSDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [6] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMinInt32x4 x y mask) + // result: (VPMINSDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedGreaterUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedGreaterUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [6] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMinInt32x8 x y mask) + // result: (VPMINSDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(6) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [3] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinInt64x2 x y mask) + // result: (VPMINSQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [3] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinInt64x4 x y mask) + // result: (VPMINSQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [3] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinInt64x8 x y mask) + // result: (VPMINSQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [3] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinInt8x16 x y mask) + // result: (VPMINSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMINSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [3] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinInt8x32 x y mask) + // result: (VPMINSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedIsNanFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedIsNanFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [3] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinInt8x64 x y mask) + // result: (VPMINSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(3) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint16x16 x y mask) + // result: (VPMINUWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint16x32 x y mask) + // result: (VPMINUWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint16x8 x y mask) + // result: (VPMINUWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMinUint32x16 x y mask) + // result: (VPMINUDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMinUint32x4 x y mask) + // result: (VPMINUDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMinUint32x8 x y mask) + // result: (VPMINUDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMinUint64x2 x y mask) + // result: (VPMINUQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMinUint64x4 x y mask) + // result: (VPMINUQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMinUint64x8 x y mask) + // result: (VPMINUQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMinUint8x16 x y mask) + // result: (VPMINUBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMinUint8x32 x y mask) + // result: (VPMINUBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMinUint8x64 x y mask) + // result: (VPMINUBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMINUBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulByPowOf2Float32x16 x y mask) + // result: (VSCALEFPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulByPowOf2Float32x4 x y mask) + // result: (VSCALEFPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulByPowOf2Float32x8 x y mask) + // result: (VSCALEFPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulByPowOf2Float64x2 x y mask) + // result: (VSCALEFPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulByPowOf2Float64x4 x y mask) + // result: (VSCALEFPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulByPowOf2Float64x8 x y mask) + // result: (VSCALEFPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VSCALEFPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [2] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedMulEvenWidenInt64x2 x y mask) + // result: (VPMULDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [2] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedMulEvenWidenInt64x4 x y mask) + // result: (VPMULDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [2] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedMulEvenWidenInt64x8 x y mask) + // result: (VPMULDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [2] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulEvenWidenUint64x2 x y mask) + // result: (VPMULUDQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [2] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulEvenWidenUint64x4 x y mask) + // result: (VPMULUDQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [2] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedMulEvenWidenUint64x8 x y mask) + // result: (VPMULUDQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULUDQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [2] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedMulFloat32x16 x y mask) + // result: (VMULPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [2] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedMulFloat32x4 x y mask) + // result: (VMULPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [2] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedMulFloat32x8 x y mask) + // result: (VMULPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [2] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedMulFloat64x2 x y mask) + // result: (VMULPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [2] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedMulFloat64x4 x y mask) + // result: (VMULPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessEqualUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [2] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedMulFloat64x8 x y mask) + // result: (VMULPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(2) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VMULPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedMulHighInt16x16 x y mask) + // result: (VPMULHWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) + v.reset(OpAMD64VPMULHWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedMulHighInt16x32 x y mask) + // result: (VPMULHWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true + v.reset(OpAMD64VPMULHWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true } } -func rewriteValueAMD64_OpMaskedLessFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighInt16x8 x y mask) + // result: (VPMULHWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x16 x y mask) + // result: (VPMULHUWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x32 x y mask) + // result: (VPMULHUWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulHighUint16x8 x y mask) + // result: (VPMULHUWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULHUWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x16 x y mask) + // result: (VPMULLWMasked256 x y (VPMOVVec16x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x32 x y mask) + // result: (VPMULLWMasked512 x y (VPMOVVec16x32ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt16x8 x y mask) + // result: (VPMULLWMasked128 x y (VPMOVVec16x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x16 x y mask) + // result: (VPMULLDMasked512 x y (VPMOVVec32x16ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x4 x y mask) + // result: (VPMULLDMasked128 x y (VPMOVVec32x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt32x8 x y mask) + // result: (VPMULLDMasked256 x y (VPMOVVec32x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x2 x y mask) + // result: (VPMULLQMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x4 x y mask) + // result: (VPMULLQMasked256 x y (VPMOVVec64x4ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedMulLowInt64x8 x y mask) + // result: (VPMULLQMasked512 x y (VPMOVVec64x8ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMULLQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualFloat32x16 x y mask) + // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] x y (VPMOVVec32x16ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x4 x y mask) + // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] x y (VPMOVVec32x4ToM mask))) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v1.AddArg(mask) + v0.AddArg3(x, y, v1) + v.AddArg(v0) + return true + } +} +func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (MaskedNotEqualFloat32x8 x y mask) + // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualFloat64x2 x y mask) + // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualFloat64x4 x y mask) + // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualFloat64x8 x y mask) + // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualInt16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualInt16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualInt16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualInt32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualInt32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualInt32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualInt64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualInt64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualInt64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualInt8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualInt8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualInt8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [1] y x (VPMOVVec16x16ToM mask))) + // match: (MaskedNotEqualUint16x16 x y mask) + // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] x y (VPMOVVec16x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [1] y x (VPMOVVec16x32ToM mask))) + // match: (MaskedNotEqualUint16x32 x y mask) + // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] x y (VPMOVVec16x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [1] y x (VPMOVVec16x8ToM mask))) + // match: (MaskedNotEqualUint16x8 x y mask) + // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] x y (VPMOVVec16x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec16x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [1] y x (VPMOVVec32x16ToM mask))) + // match: (MaskedNotEqualUint32x16 x y mask) + // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] x y (VPMOVVec32x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [1] y x (VPMOVVec32x4ToM mask))) + // match: (MaskedNotEqualUint32x4 x y mask) + // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] x y (VPMOVVec32x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [1] y x (VPMOVVec32x8ToM mask))) + // match: (MaskedNotEqualUint32x8 x y mask) + // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] x y (VPMOVVec32x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec32x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [1] y x (VPMOVVec64x2ToM mask))) + // match: (MaskedNotEqualUint64x2 x y mask) + // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] x y (VPMOVVec64x2ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x2) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [1] y x (VPMOVVec64x4ToM mask))) + // match: (MaskedNotEqualUint64x4 x y mask) + // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] x y (VPMOVVec64x4ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x4) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [1] y x (VPMOVVec64x8ToM mask))) + // match: (MaskedNotEqualUint64x8 x y mask) + // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] x y (VPMOVVec64x8ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec64x8) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [1] y x (VPMOVVec8x16ToM mask))) + // match: (MaskedNotEqualUint8x16 x y mask) + // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] x y (VPMOVVec8x16ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x16) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [1] y x (VPMOVVec8x32ToM mask))) + // match: (MaskedNotEqualUint8x32 x y mask) + // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] x y (VPMOVVec8x32ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x32) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedLessUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MaskedLessUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [1] y x (VPMOVVec8x64ToM mask))) + // match: (MaskedNotEqualUint8x64 x y mask) + // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] x y (VPMOVVec8x64ToM mask))) for { x := v_0 y := v_1 mask := v_2 v.reset(OpAMD64VPMOVMToVec8x64) v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(1) + v0.AuxInt = int8ToAuxInt(4) v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v1.AddArg(mask) - v0.AddArg3(y, x, v1) + v0.AddArg3(x, y, v1) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x16 x y mask) - // result: (VMAXPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrFloat32x16 x y mask) + // result: (VORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked512) + v.reset(OpAMD64VORPSMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x4 x y mask) - // result: (VMAXPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrFloat32x4 x y mask) + // result: (VORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked128) + v.reset(OpAMD64VORPSMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat32x8 x y mask) - // result: (VMAXPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrFloat32x8 x y mask) + // result: (VORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPSMasked256) + v.reset(OpAMD64VORPSMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x2 x y mask) - // result: (VMAXPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrFloat64x2 x y mask) + // result: (VORPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked128) + v.reset(OpAMD64VORPDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x4 x y mask) - // result: (VMAXPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrFloat64x4 x y mask) + // result: (VORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked256) + v.reset(OpAMD64VORPDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxFloat64x8 x y mask) - // result: (VMAXPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrFloat64x8 x y mask) + // result: (VORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VMAXPDMasked512) + v.reset(OpAMD64VORPDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x16 x y mask) - // result: (VPMAXSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedOrInt32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x32 x y mask) - // result: (VPMAXSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedOrInt32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt16x8 x y mask) - // result: (VPMAXSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedOrInt32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x16 x y mask) - // result: (VPMAXSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedOrInt64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x4 x y mask) - // result: (VPMAXSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedOrInt64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt32x8 x y mask) - // result: (VPMAXSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedOrInt64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x2 x y mask) - // result: (VPMAXSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedOrUint32x16 x y mask) + // result: (VPORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x4 x y mask) - // result: (VPMAXSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedOrUint32x4 x y mask) + // result: (VPORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt64x8 x y mask) - // result: (VPMAXSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedOrUint32x8 x y mask) + // result: (VPORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x16 x y mask) - // result: (VPMAXSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedOrUint64x2 x y mask) + // result: (VPORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x32 x y mask) - // result: (VPMAXSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedOrUint64x4 x y mask) + // result: (VPORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxInt8x64 x y mask) - // result: (VPMAXSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedOrUint64x8 x y mask) + // result: (VPORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMAXSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v.reset(OpAMD64VPORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x16 x y mask) - // result: (VPMAXUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountInt16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x32 x y mask) - // result: (VPMAXUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountInt16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint16x8 x y mask) - // result: (VPMAXUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountInt16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUWMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x16 x y mask) - // result: (VPMAXUDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountInt32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x4 x y mask) - // result: (VPMAXUDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountInt32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint32x8 x y mask) - // result: (VPMAXUDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountInt32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUDMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x2 x y mask) - // result: (VPMAXUQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountInt64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x4 x y mask) - // result: (VPMAXUQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountInt64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint64x8 x y mask) - // result: (VPMAXUQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountInt64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUQMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x16 x y mask) - // result: (VPMAXUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedPopCountInt8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked128) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x32 x y mask) - // result: (VPMAXUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedPopCountInt8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked256) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMaxUint8x64(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMaxUint8x64 x y mask) - // result: (VPMAXUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedPopCountInt8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMAXUBMasked512) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x16 x y mask) - // result: (VMINPSMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint16x16 x mask) + // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x4 x y mask) - // result: (VMINPSMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint16x32 x mask) + // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat32x8 x y mask) - // result: (VMINPSMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint16x8 x mask) + // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x2(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x2 x y mask) - // result: (VMINPDMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedPopCountUint32x16 x mask) + // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x4 x y mask) - // result: (VMINPDMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedPopCountUint32x4 x mask) + // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinFloat64x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinFloat64x8 x y mask) - // result: (VMINPDMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedPopCountUint32x8 x mask) + // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMINPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x16 x y mask) - // result: (VPMINSWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedPopCountUint64x2 x mask) + // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x32(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x32 x y mask) - // result: (VPMINSWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedPopCountUint64x4 x mask) + // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt16x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt16x8 x y mask) - // result: (VPMINSWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedPopCountUint64x8 x mask) + // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x16(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x16 x y mask) - // result: (VPMINSDMasked512 y x (VPMOVVec32x16ToM mask)) + // match: (MaskedPopCountUint8x16 x mask) + // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x4(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x4 x y mask) - // result: (VPMINSDMasked128 y x (VPMOVVec32x4ToM mask)) + // match: (MaskedPopCountUint8x32 x mask) + // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt32x8(v *Value) bool { - v_2 := v.Args[2] +func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt32x8 x y mask) - // result: (VPMINSDMasked256 y x (VPMOVVec32x8ToM mask)) + // match: (MaskedPopCountUint8x64 x mask) + // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) for { x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINSDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + mask := v_1 + v.reset(OpAMD64VPOPCNTBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x2 x y mask) - // result: (VPMINSQMasked128 y x (VPMOVVec64x2ToM mask)) + // match: (MaskedSaturatedAddInt16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x4 x y mask) - // result: (VPMINSQMasked256 y x (VPMOVVec64x4ToM mask)) + // match: (MaskedSaturatedAddInt16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt64x8 x y mask) - // result: (VPMINSQMasked512 y x (VPMOVVec64x8ToM mask)) + // match: (MaskedSaturatedAddInt16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v.reset(OpAMD64VPADDSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x16 x y mask) - // result: (VPMINSBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddInt8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x32 x y mask) - // result: (VPMINSBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddInt8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinInt8x64 x y mask) - // result: (VPMINSBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddInt8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINSBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x16 x y mask) - // result: (VPMINUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedAddUint16x16 x y mask) + // result: (VPADDSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked256) + v.reset(OpAMD64VPADDSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x32 x y mask) - // result: (VPMINUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedAddUint16x32 x y mask) + // result: (VPADDSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked512) + v.reset(OpAMD64VPADDSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint16x8 x y mask) - // result: (VPMINUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedAddUint16x8 x y mask) + // result: (VPADDSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUWMasked128) + v.reset(OpAMD64VPADDSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x16 x y mask) - // result: (VPMINUDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x4 x y mask) - // result: (VPMINUDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint32x8 x y mask) - // result: (VPMINUDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x2 x y mask) - // result: (VPMINUQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x4 x y mask) - // result: (VPMINUQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMinUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMinUint64x8 x y mask) - // result: (VPMINUQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMINUQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x16 x y mask) - // result: (VPMINUBMasked128 y x (VPMOVVec8x16ToM mask)) + // match: (MaskedSaturatedAddUint8x16 x y mask) + // result: (VPADDSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked128) + v.reset(OpAMD64VPADDSBMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x32 x y mask) - // result: (VPMINUBMasked256 y x (VPMOVVec8x32ToM mask)) + // match: (MaskedSaturatedAddUint8x32 x y mask) + // result: (VPADDSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked256) + v.reset(OpAMD64VPADDSBMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMinUint8x64(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMinUint8x64 x y mask) - // result: (VPMINUBMasked512 y x (VPMOVVec8x64ToM mask)) + // match: (MaskedSaturatedAddUint8x64 x y mask) + // result: (VPADDSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMINUBMasked512) + v.reset(OpAMD64VPADDSBMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x16 x y mask) - // result: (VSCALEFPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x4 x y mask) - // result: (VSCALEFPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float32x8 x y mask) - // result: (VSCALEFPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x2 x y mask) - // result: (VSCALEFPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x4 x y mask) - // result: (VSCALEFPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulByPowOf2Float64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulByPowOf2Float64x8 x y mask) - // result: (VSCALEFPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VSCALEFPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x2 x y mask) - // result: (VPMULDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x4 x y mask) - // result: (VPMULDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenInt64x8 x y mask) - // result: (VPMULDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x2 x y mask) - // result: (VPMULUDQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x4 x y mask) - // result: (VPMULUDQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulEvenWidenUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulEvenWidenUint64x8 x y mask) - // result: (VPMULUDQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULUDQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x16 x y mask) - // result: (VMULPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x4 x y mask) - // result: (VMULPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat32x8 x y mask) - // result: (VMULPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x2 x y mask) - // result: (VMULPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x4 x y mask) - // result: (VMULPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulFloat64x8 x y mask) - // result: (VMULPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VMULPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x16 x y mask) - // result: (VPMULHWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked256) + v.reset(OpAMD64VPSUBSWMasked256) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x32 x y mask) - // result: (VPMULHWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked512) + v.reset(OpAMD64VPSUBSWMasked512) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighInt16x8 x y mask) - // result: (VPMULHWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHWMasked128) + v.reset(OpAMD64VPSUBSWMasked128) v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x16 x y mask) - // result: (VPMULHUWMasked256 y x (VPMOVVec16x16ToM mask)) + // match: (MaskedSaturatedSubInt8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x32 x y mask) - // result: (VPMULHUWMasked512 y x (VPMOVVec16x32ToM mask)) + // match: (MaskedSaturatedSubInt8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMaskedMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (MaskedMulHighUint16x8 x y mask) - // result: (VPMULHUWMasked128 y x (VPMOVVec16x8ToM mask)) + // match: (MaskedSaturatedSubInt8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 mask := v_2 - v.reset(OpAMD64VPMULHUWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x16 x y mask) - // result: (VPMULLWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x32 x y mask) - // result: (VPMULLWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt16x8 x y mask) - // result: (VPMULLWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x16 x y mask) - // result: (VPMULLDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x4 x y mask) - // result: (VPMULLDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt32x8 x y mask) - // result: (VPMULLDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x2 x y mask) - // result: (VPMULLQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x4 x y mask) - // result: (VPMULLQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedMulLowInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedMulLowInt64x8 x y mask) - // result: (VPMULLQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMULLQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x16 x y mask) - // result: (VPMOVMToVec32x16 (VCMPPSMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x4 x y mask) - // result: (VPMOVMToVec32x4 (VCMPPSMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat32x8 x y mask) - // result: (VPMOVMToVec32x8 (VCMPPSMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPSMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x2 x y mask) - // result: (VPMOVMToVec64x2 (VCMPPDMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x4 x y mask) - // result: (VPMOVMToVec64x4 (VCMPPDMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualFloat64x8 x y mask) - // result: (VPMOVMToVec64x8 (VCMPPDMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualInt8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x16 x y mask) - // result: (VPMOVMToVec16x16 (VPCMPUWMasked256 [4] y x (VPMOVVec16x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x32 x y mask) - // result: (VPMOVMToVec16x32 (VPCMPUWMasked512 [4] y x (VPMOVVec16x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint16x8 x y mask) - // result: (VPMOVMToVec16x8 (VPCMPUWMasked128 [4] y x (VPMOVVec16x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUWMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x16 x y mask) - // result: (VPMOVMToVec32x16 (VPCMPUDMasked512 [4] y x (VPMOVVec32x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x4 x y mask) - // result: (VPMOVMToVec32x4 (VPCMPUDMasked128 [4] y x (VPMOVVec32x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint32x8 x y mask) - // result: (VPMOVMToVec32x8 (VPCMPUDMasked256 [4] y x (VPMOVVec32x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUDMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x2 x y mask) - // result: (VPMOVMToVec64x2 (VPCMPUQMasked128 [4] y x (VPMOVVec64x2ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x4 x y mask) - // result: (VPMOVMToVec64x4 (VPCMPUQMasked256 [4] y x (VPMOVVec64x4ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint64x8 x y mask) - // result: (VPMOVMToVec64x8 (VPCMPUQMasked512 [4] y x (VPMOVVec64x8ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x16 x y mask) - // result: (VPMOVMToVec8x16 (VPCMPUBMasked128 [4] y x (VPMOVVec8x16ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x32 x y mask) - // result: (VPMOVMToVec8x32 (VPCMPUBMasked256 [4] y x (VPMOVVec8x32ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedNotEqualUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (MaskedNotEqualUint8x64 x y mask) - // result: (VPMOVMToVec8x64 (VPCMPUBMasked512 [4] y x (VPMOVVec8x64ToM mask))) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUBMasked512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v1 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v1.AddArg(mask) - v0.AddArg3(y, x, v1) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x16 x y mask) - // result: (VORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x4 x y mask) - // result: (VORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat32x8 x y mask) - // result: (VORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x2 x y mask) - // result: (VORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x4 x y mask) - // result: (VORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrFloat64x8 x y mask) - // result: (VORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrInt64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x16 x y mask) - // result: (VPORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x4 x y mask) - // result: (VPORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint32x8 x y mask) - // result: (VPORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x2 x y mask) - // result: (VPORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x4 x y mask) - // result: (VPORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedOrUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedOrUint64x8 x y mask) - // result: (VPORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountInt8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x16 x mask) - // result: (VPOPCNTWMasked256 x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x32 x mask) - // result: (VPOPCNTWMasked512 x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint16x8 x mask) - // result: (VPOPCNTWMasked128 x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x16 x mask) - // result: (VPOPCNTDMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x4 x mask) - // result: (VPOPCNTDMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint32x8 x mask) - // result: (VPOPCNTDMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x2 x mask) - // result: (VPOPCNTQMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x4 x mask) - // result: (VPOPCNTQMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint64x8 x mask) - // result: (VPOPCNTQMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x16 x mask) - // result: (VPOPCNTBMasked128 x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x32 x mask) - // result: (VPOPCNTBMasked256 x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedPopCountUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedPopCountUint8x64 x mask) - // result: (VPOPCNTBMasked512 x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VPOPCNTBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddInt8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x16 x y mask) - // result: (VPADDSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x32 x y mask) - // result: (VPADDSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint16x8 x y mask) - // result: (VPADDSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x16 x y mask) - // result: (VPADDSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x32 x y mask) - // result: (VPADDSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedAddUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedAddUint8x64 x y mask) - // result: (VPADDSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPADDSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubInt8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x16 x y mask) - // result: (VPSUBSWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x32 x y mask) - // result: (VPSUBSWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint16x8 x y mask) - // result: (VPSUBSWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x16 x y mask) - // result: (VPSUBSBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x32 x y mask) - // result: (VPSUBSBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSaturatedSubUint8x64 x y mask) - // result: (VPSUBSBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBSBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x16 x mask) - // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x4 x mask) - // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat32x8 x mask) - // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x2 x mask) - // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x4 x mask) - // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSqrtFloat64x8 x mask) - // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - mask := v_1 - v.reset(OpAMD64VSQRTPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg2(x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x16 x y mask) - // result: (VADDPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x4 x y mask) - // result: (VADDPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat32x8 x y mask) - // result: (VADDPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x2 x y mask) - // result: (VADDPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x4 x y mask) - // result: (VADDPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubFloat64x8 x y mask) - // result: (VADDPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VADDPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubInt8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x16 x y mask) - // result: (VPSUBWMasked256 y x (VPMOVVec16x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x32 x y mask) - // result: (VPSUBWMasked512 y x (VPMOVVec16x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint16x8 x y mask) - // result: (VPSUBWMasked128 y x (VPMOVVec16x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBWMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x16 x y mask) - // result: (VPSUBDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x4 x y mask) - // result: (VPSUBDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint32x8 x y mask) - // result: (VPSUBDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x2 x y mask) - // result: (VPSUBQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x4 x y mask) - // result: (VPSUBQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint64x8 x y mask) - // result: (VPSUBQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x16 x y mask) - // result: (VPSUBBMasked128 y x (VPMOVVec8x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x32 x y mask) - // result: (VPSUBBMasked256 y x (VPMOVVec8x32ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedSubUint8x64 x y mask) - // result: (VPSUBBMasked512 y x (VPMOVVec8x64ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPSUBBMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x16 x y mask) - // result: (VXORPSMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x4 x y mask) - // result: (VXORPSMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat32x8 x y mask) - // result: (VXORPSMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPSMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x2 x y mask) - // result: (VXORPDMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x4 x y mask) - // result: (VXORPDMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorFloat64x8 x y mask) - // result: (VXORPDMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VXORPDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorInt64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x16 x y mask) - // result: (VPXORDMasked512 y x (VPMOVVec32x16ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x4 x y mask) - // result: (VPXORDMasked128 y x (VPMOVVec32x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint32x8 x y mask) - // result: (VPXORDMasked256 y x (VPMOVVec32x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORDMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x2 x y mask) - // result: (VPXORQMasked128 y x (VPMOVVec64x2ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked128) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x4 x y mask) - // result: (VPXORQMasked256 y x (VPMOVVec64x4ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked256) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (MaskedXorUint64x8 x y mask) - // result: (VPXORQMasked512 y x (VPMOVVec64x8ToM mask)) - for { - x := v_0 - y := v_1 - mask := v_2 - v.reset(OpAMD64VPXORQMasked512) - v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) - v0.AddArg(mask) - v.AddArg3(y, x, v0) - return true - } -} -func rewriteValueAMD64_OpMax32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max32F x y) - // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg32F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin32F, t) - v1 := b.NewValue0(v.Pos, OpNeg32F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg32F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMax64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Max64F x y) - // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpNeg64F) - v.Type = t - v0 := b.NewValue0(v.Pos, OpMin64F, t) - v1 := b.NewValue0(v.Pos, OpNeg64F, t) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpNeg64F, t) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x16 x y) - // result: (VMAXPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x4 x y) - // result: (VMAXPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat32x8 x y) - // result: (VMAXPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x2 x y) - // result: (VMAXPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x4 x y) - // result: (VMAXPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxFloat64x8 x y) - // result: (VMAXPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMAXPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x16 x y) - // result: (VPMAXSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x32 x y) - // result: (VPMAXSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt16x8 x y) - // result: (VPMAXSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x16 x y) - // result: (VPMAXSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x4 x y) - // result: (VPMAXSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt32x8 x y) - // result: (VPMAXSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x2 x y) - // result: (VPMAXSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x4 x y) - // result: (VPMAXSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt64x8 x y) - // result: (VPMAXSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x16 x y) - // result: (VPMAXSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x32 x y) - // result: (VPMAXSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxInt8x64 x y) - // result: (VPMAXSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x16 x y) - // result: (VPMAXUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x32 x y) - // result: (VPMAXUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint16x8 x y) - // result: (VPMAXUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x16 x y) - // result: (VPMAXUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x4 x y) - // result: (VPMAXUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint32x8 x y) - // result: (VPMAXUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x2 x y) - // result: (VPMAXUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x4 x y) - // result: (VPMAXUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint64x8 x y) - // result: (VPMAXUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x16 x y) - // result: (VPMAXUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x32 x y) - // result: (VPMAXUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMaxUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MaxUint8x64 x y) - // result: (VPMAXUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMAXUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMin32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min32F x y) - // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMin64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Min64F x y) - // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) - for { - t := v.Type - x := v_0 - y := v_1 - v.reset(OpAMD64POR) - v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) - v1.AddArg2(x, y) - v0.AddArg2(v1, x) - v.AddArg2(v0, v1) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x16 x y) - // result: (VMINPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x4 x y) - // result: (VMINPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat32x8 x y) - // result: (VMINPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x2 x y) - // result: (VMINPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x4 x y) - // result: (VMINPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinFloat64x8 x y) - // result: (VMINPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VMINPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x16 x y) - // result: (VPMINSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x32 x y) - // result: (VPMINSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt16x8 x y) - // result: (VPMINSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x16 x y) - // result: (VPMINSD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x4 x y) - // result: (VPMINSD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt32x8 x y) - // result: (VPMINSD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x2 x y) - // result: (VPMINSQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x4 x y) - // result: (VPMINSQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt64x8 x y) - // result: (VPMINSQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x16 x y) - // result: (VPMINSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x32 x y) - // result: (VPMINSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinInt8x64 x y) - // result: (VPMINSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x16 x y) - // result: (VPMINUW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x32 x y) - // result: (VPMINUW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint16x8 x y) - // result: (VPMINUW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x16 x y) - // result: (VPMINUD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x4 x y) - // result: (VPMINUD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint32x8 x y) - // result: (VPMINUD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x2 x y) - // result: (VPMINUQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x4 x y) - // result: (VPMINUQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint64x8 x y) - // result: (VPMINUQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x16 x y) - // result: (VPMINUB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x32 x y) - // result: (VPMINUB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMinUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (MinUint8x64 x y) - // result: (VPMINUB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMINUB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpMod16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16 [a] x y) - // result: (Select1 (DIVW [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod16u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod16u x y) - // result: (Select1 (DIVWU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32 [a] x y) - // result: (Select1 (DIVL [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod32u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod32u x y) - // result: (Select1 (DIVLU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64 [a] x y) - // result: (Select1 (DIVQ [a] x y)) - for { - a := auxIntToBool(v.AuxInt) - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) - v0.AuxInt = boolToAuxInt(a) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod64u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod64u x y) - // result: (Select1 (DIVQU x y)) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) - v0.AddArg2(x, y) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8 x y) - // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) - v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMod8u(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Mod8u x y) - // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) - for { - x := v_0 - y := v_1 - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) - v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v1.AddArg(x) - v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) - v2.AddArg(y) - v0.AddArg2(v1, v2) - v.AddArg(v0) - return true - } -} -func rewriteValueAMD64_OpMove(v *Value) bool { - v_2 := v.Args[2] - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (Move [0] _ _ mem) - // result: mem - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - mem := v_2 - v.copyOf(mem) - return true - } - // match: (Move [1] dst src mem) - // result: (MOVBstore dst (MOVBload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 1 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [2] dst src mem) - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 2 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [4] dst src mem) - // result: (MOVLstore dst (MOVLload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 4 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [8] dst src mem) - // result: (MOVQstore dst (MOVQload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 8 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVQstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [16] dst src mem) - // result: (MOVOstore dst (MOVOload src mem) mem) - for { - if auxIntToInt64(v.AuxInt) != 16 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVOstore) - v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v0.AddArg2(src, mem) - v.AddArg3(dst, v0, mem) - return true - } - // match: (Move [32] dst src mem) - // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 32 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [48] dst src mem) - // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 48 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(16) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [64] dst src mem) - // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) - for { - if auxIntToInt64(v.AuxInt) != 64 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(32) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(32) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(32) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) - v2.AuxInt = int64ToAuxInt(32) - v2.AddArg3(dst, src, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [3] dst src mem) - // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 3 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(2) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(2) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [5] dst src mem) - // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 5 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [6] dst src mem) - // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 6 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(4) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(4) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [7] dst src mem) - // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 7 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(3) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(3) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [9] dst src mem) - // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 9 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVBstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [10] dst src mem) - // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 10 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVWstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [11] dst src mem) - // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 11 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(7) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(7) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [12] dst src mem) - // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - if auxIntToInt64(v.AuxInt) != 12 { - break - } - dst := v_0 - src := v_1 - mem := v_2 - v.reset(OpAMD64MOVLstore) - v.AuxInt = int32ToAuxInt(8) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) - v0.AuxInt = int32ToAuxInt(8) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s >= 13 && s <= 15 - // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s >= 13 && s <= 15) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = int32ToAuxInt(int32(s - 8)) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v0.AuxInt = int32ToAuxInt(int32(s - 8)) - v0.AddArg2(src, mem) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v2.AddArg2(src, mem) - v1.AddArg3(dst, v2, mem) - v.AddArg3(dst, v0, v1) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 <= 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 <= 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 - // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16 && s%16 != 0 && s%16 > 8) { - break - } - v.reset(OpMove) - v.AuxInt = int64ToAuxInt(s - s%16) - v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) - v0.AuxInt = int64ToAuxInt(s % 16) - v0.AddArg(dst) - v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) - v1.AuxInt = int64ToAuxInt(s % 16) - v1.AddArg(src) - v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) - v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) - v3.AddArg2(src, mem) - v2.AddArg3(dst, v3, mem) - v.AddArg3(v0, v1, v2) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) - // result: (DUFFCOPY [s] dst src mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64DUFFCOPY) - v.AuxInt = int64ToAuxInt(s) - v.AddArg3(dst, src, mem) - return true - } - // match: (Move [s] dst src mem) - // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) - // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) - for { - s := auxIntToInt64(v.AuxInt) - dst := v_0 - src := v_1 - mem := v_2 - if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { - break - } - v.reset(OpAMD64REPMOVSQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(s / 8) - v.AddArg4(dst, src, v0, mem) + v.AddArg3(x, y, v0) return true } - return false } -func rewriteValueAMD64_OpMulByPowOf2Float32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x16 x y) - // result: (VSCALEFPS512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x16 x y mask) + // result: (VPSUBSWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x4 x y) - // result: (VSCALEFPS128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x32 x y mask) + // result: (VPSUBSWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float32x8 x y) - // result: (VSCALEFPS256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint16x8 x y mask) + // result: (VPSUBSWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x2 x y) - // result: (VSCALEFPD128 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x16 x y mask) + // result: (VPSUBSBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x4 x y) - // result: (VSCALEFPD256 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x32 x y mask) + // result: (VPSUBSBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulByPowOf2Float64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSaturatedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulByPowOf2Float64x8 x y) - // result: (VSCALEFPD512 y x) + b := v.Block + // match: (MaskedSaturatedSubUint8x64 x y mask) + // result: (VPSUBSBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VSCALEFPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBSBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x4 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x16 x mask) + // result: (VSQRTPSMasked512 x (VPMOVVec32x16ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt32x8 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat32x4 x mask) + // result: (VSQRTPSMasked128 x (VPMOVVec32x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x2 x y) - // result: (VPMULDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat32x8 x mask) + // result: (VSQRTPSMasked256 x (VPMOVVec32x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x4 x y) - // result: (VPMULDQ256 y x) + b := v.Block + // match: (MaskedSqrtFloat64x2 x mask) + // result: (VSQRTPDMasked128 x (VPMOVVec64x2ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ256) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenInt64x8 x y) - // result: (VPMULDQ512 y x) + b := v.Block + // match: (MaskedSqrtFloat64x4 x mask) + // result: (VSQRTPDMasked256 x (VPMOVVec64x4ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULDQ512) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSqrtFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x4 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSqrtFloat64x8 x mask) + // result: (VSQRTPDMasked512 x (VPMOVVec64x8ToM mask)) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_1 + v.reset(OpAMD64VSQRTPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint32x8 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x16 x y mask) + // result: (VADDPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x2 x y) - // result: (VPMULUDQ128 y x) + b := v.Block + // match: (MaskedSubFloat32x4 x y mask) + // result: (VADDPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x4 x y) - // result: (VPMULUDQ256 y x) + b := v.Block + // match: (MaskedSubFloat32x8 x y mask) + // result: (VADDPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulEvenWidenUint64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulEvenWidenUint64x8 x y) - // result: (VPMULUDQ512 y x) + b := v.Block + // match: (MaskedSubFloat64x2 x y mask) + // result: (VADDPDMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULUDQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x16 x y) - // result: (VMULPS512 y x) + b := v.Block + // match: (MaskedSubFloat64x4 x y mask) + // result: (VADDPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x4 x y) - // result: (VMULPS128 y x) + b := v.Block + // match: (MaskedSubFloat64x8 x y mask) + // result: (VADDPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VADDPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat32x8 x y) - // result: (VMULPS256 y x) + b := v.Block + // match: (MaskedSubInt16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPS256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x2 x y) - // result: (VMULPD128 y x) + b := v.Block + // match: (MaskedSubInt16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x4 x y) - // result: (VMULPD256 y x) + b := v.Block + // match: (MaskedSubInt16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulFloat64x8 x y) - // result: (VMULPD512 y x) + b := v.Block + // match: (MaskedSubInt32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VMULPD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x16 x y) - // result: (VPMULHW256 y x) + b := v.Block + // match: (MaskedSubInt32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x32 x y) - // result: (VPMULHW512 y x) + b := v.Block + // match: (MaskedSubInt32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighInt16x8 x y) - // result: (VPMULHW128 y x) + b := v.Block + // match: (MaskedSubInt64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x16 x y) - // result: (VPMULHUW256 y x) + b := v.Block + // match: (MaskedSubInt64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x32 x y) - // result: (VPMULHUW512 y x) + b := v.Block + // match: (MaskedSubInt64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulHighUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulHighUint16x8 x y) - // result: (VPMULHUW128 y x) + b := v.Block + // match: (MaskedSubInt8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULHUW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x16 x y) - // result: (VPMULLW256 y x) + b := v.Block + // match: (MaskedSubInt8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubInt8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x32 x y) - // result: (VPMULLW512 y x) + b := v.Block + // match: (MaskedSubInt8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt16x8 x y) - // result: (VPMULLW128 y x) + b := v.Block + // match: (MaskedSubUint16x16 x y mask) + // result: (VPSUBWMasked256 x y (VPMOVVec16x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLW128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x16 x y) - // result: (VPMULLD512 y x) + b := v.Block + // match: (MaskedSubUint16x32 x y mask) + // result: (VPSUBWMasked512 x y (VPMOVVec16x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint16x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x4 x y) - // result: (VPMULLD128 y x) + b := v.Block + // match: (MaskedSubUint16x8 x y mask) + // result: (VPSUBWMasked128 x y (VPMOVVec16x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBWMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec16x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt32x8 x y) - // result: (VPMULLD256 y x) + b := v.Block + // match: (MaskedSubUint32x16 x y mask) + // result: (VPSUBDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLD256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x2 x y) - // result: (VPMULLQ128 y x) + b := v.Block + // match: (MaskedSubUint32x4 x y mask) + // result: (VPSUBDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ128) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x4 x y) - // result: (VPMULLQ256 y x) + b := v.Block + // match: (MaskedSubUint32x8 x y mask) + // result: (VPSUBDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ256) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpMulLowInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (MulLowInt64x8 x y) - // result: (VPMULLQ512 y x) + b := v.Block + // match: (MaskedSubUint64x2 x y mask) + // result: (VPSUBQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMULLQ512) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPSUBQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x4(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg32F x) - // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) + // match: (MaskedSubUint64x4 x y mask) + // result: (VPSUBQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) - v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeg64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint64x8(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (Neg64F x) - // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + // match: (MaskedSubUint64x8 x y mask) + // result: (VPSUBQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 - v.reset(OpAMD64PXOR) - v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) - v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) - v.AddArg2(x, v0) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPSUBQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq16(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq16 x y) - // result: (SETNE (CMPW x y)) + // match: (MaskedSubUint8x16 x y mask) + // result: (VPSUBBMasked128 x y (VPMOVVec8x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x32(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32 x y) - // result: (SETNE (CMPL x y)) + // match: (MaskedSubUint8x32 x y mask) + // result: (VPSUBBMasked256 x y (VPMOVVec8x32ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x32ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq32F(v *Value) bool { +func rewriteValueAMD64_OpMaskedSubUint8x64(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq32F x y) - // result: (SETNEF (UCOMISS x y)) + // match: (MaskedSubUint8x64 x y mask) + // result: (VPSUBBMasked512 x y (VPMOVVec8x64ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPSUBBMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec8x64ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64 x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat32x16 x y mask) + // result: (VXORPSMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq64F(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq64F x y) - // result: (SETNEF (UCOMISD x y)) + // match: (MaskedXorFloat32x4 x y mask) + // result: (VXORPSMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNEF) - v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeq8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Neq8 x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat32x8 x y mask) + // result: (VXORPSMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPSMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) + return true + } +} +func rewriteValueAMD64_OpMaskedXorFloat64x2(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (MaskedXorFloat64x2 x y mask) + // result: (VXORPDMasked128 x y (VPMOVVec64x2ToM mask)) + for { + x := v_0 + y := v_1 + mask := v_2 + v.reset(OpAMD64VXORPDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqB(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqB x y) - // result: (SETNE (CMPB x y)) + // match: (MaskedXorFloat64x4 x y mask) + // result: (VXORPDMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNeqPtr(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorFloat64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (NeqPtr x y) - // result: (SETNE (CMPQ x y)) + // match: (MaskedXorFloat64x8 x y mask) + // result: (VXORPDMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64SETNE) - v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v0.AddArg2(x, y) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VXORPDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNot(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x16(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Not x) - // result: (XORLconst [1] x) + b := v.Block + // match: (MaskedXorInt32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 - v.reset(OpAMD64XORLconst) - v.AuxInt = int32ToAuxInt(1) - v.AddArg(x) + y := v_1 + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat32x16 x y) - // result: (VPMOVMToVec32x16 (VCMPPS512 [4] y x)) + // match: (MaskedXorInt32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x4 x y) - // result: (VCMPPS128 [4] y x) + b := v.Block + // match: (MaskedXorInt32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat32x8 x y) - // result: (VCMPPS256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPS256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x2 x y) - // result: (VCMPPD128 [4] y x) + b := v.Block + // match: (MaskedXorInt64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD128) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorInt64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (NotEqualFloat64x4 x y) - // result: (VCMPPD256 [4] y x) + b := v.Block + // match: (MaskedXorInt64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VCMPPD256) - v.AuxInt = int8ToAuxInt(4) - v.AddArg2(y, x) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x16(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualFloat64x8 x y) - // result: (VPMOVMToVec64x8 (VCMPPD512 [4] y x)) + // match: (MaskedXorUint32x16 x y mask) + // result: (VPXORDMasked512 x y (VPMOVVec32x16ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x16ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPW256 [4] y x)) + // match: (MaskedXorUint32x4 x y mask) + // result: (VPXORDMasked128 x y (VPMOVVec32x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint32x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPW512 [4] y x)) + // match: (MaskedXorUint32x8 x y mask) + // result: (VPXORDMasked256 x y (VPMOVVec32x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORDMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec32x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x2(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPW128 [4] y x)) + // match: (MaskedXorUint64x2 x y mask) + // result: (VPXORQMasked128 x y (VPMOVVec64x2ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked128) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x2ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x4(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPD512 [4] y x)) + // match: (MaskedXorUint64x4 x y mask) + // result: (VPXORQMasked256 x y (VPMOVVec64x4ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked256) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x4ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { +func rewriteValueAMD64_OpMaskedXorUint64x8(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPD128 [4] y x)) + // match: (MaskedXorUint64x8 x y mask) + // result: (VPXORQMasked512 x y (VPMOVVec64x8ToM mask)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + mask := v_2 + v.reset(OpAMD64VPXORQMasked512) + v0 := b.NewValue0(v.Pos, OpAMD64VPMOVVec64x8ToM, types.TypeMask) + v0.AddArg(mask) + v.AddArg3(x, y, v0) return true } } -func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { +func rewriteValueAMD64_OpMax32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPD256 [4] y x)) + // match: (Max32F x y) + // result: (Neg32F (Min32F (Neg32F x) (Neg32F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg32F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin32F, t) + v1 := b.NewValue0(v.Pos, OpNeg32F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg32F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { +func rewriteValueAMD64_OpMax64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] y x)) + // match: (Max64F x y) + // result: (Neg64F (Min64F (Neg64F x) (Neg64F y))) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpNeg64F) + v.Type = t + v0 := b.NewValue0(v.Pos, OpMin64F, t) + v1 := b.NewValue0(v.Pos, OpNeg64F, t) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpNeg64F, t) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { +func rewriteValueAMD64_OpMin32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] y x)) + // match: (Min32F x y) + // result: (POR (MINSS (MINSS x y) x) (MINSS x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { +func rewriteValueAMD64_OpMin64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualInt64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] y x)) + // match: (Min64F x y) + // result: (POR (MINSD (MINSD x y) x) (MINSD x y)) for { + t := v.Type x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + v.reset(OpAMD64POR) + v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t) + v1.AddArg2(x, y) + v0.AddArg2(v1, x) + v.AddArg2(v0, v1) return true } } -func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { +func rewriteValueAMD64_OpMod16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPB128 [4] y x)) + // match: (Mod16 [a] x y) + // result: (Select1 (DIVW [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { +func rewriteValueAMD64_OpMod16u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPB256 [4] y x)) + // match: (Mod16u x y) + // result: (Select1 (DIVWU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { +func rewriteValueAMD64_OpMod32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualInt8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPB512 [4] y x)) + // match: (Mod32 [a] x y) + // result: (Select1 (DIVL [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { +func rewriteValueAMD64_OpMod32u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x16 x y) - // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] y x)) + // match: (Mod32u x y) + // result: (Select1 (DIVLU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { +func rewriteValueAMD64_OpMod64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x32 x y) - // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] y x)) + // match: (Mod64 [a] x y) + // result: (Select1 (DIVQ [a] x y)) for { + a := auxIntToBool(v.AuxInt) x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64)) + v0.AuxInt = boolToAuxInt(a) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { +func rewriteValueAMD64_OpMod64u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint16x8 x y) - // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] y x)) + // match: (Mod64u x y) + // result: (Select1 (DIVQU x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec16x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { +func rewriteValueAMD64_OpMod8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x16 x y) - // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] y x)) + // match: (Mod8 x y) + // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16)) + v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { +func rewriteValueAMD64_OpMod8u(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x4 x y) - // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] y x)) + // match: (Mod8u x y) + // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { x := v_0 y := v_1 - v.reset(OpAMD64VPMOVMToVec32x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16) + v2.AddArg(y) + v0.AddArg2(v1, v2) v.AddArg(v0) return true } } -func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { +func rewriteValueAMD64_OpMove(v *Value) bool { + v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (NotEqualUint32x8 x y) - // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] y x)) + // match: (Move [0] _ _ mem) + // result: mem for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec32x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVQstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [16] dst src mem) + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [32] dst src mem) + // result: (Move [16] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) + for { + if auxIntToInt64(v.AuxInt) != 32 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x2 x y) - // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] y x)) + // match: (Move [48] dst src mem) + // result: (Move [32] (OffPtr dst [16]) (OffPtr src [16]) (Move [16] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x2) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 48 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(16) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x4 x y) - // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] y x)) + // match: (Move [64] dst src mem) + // result: (Move [32] (OffPtr dst [32]) (OffPtr src [32]) (Move [32] dst src mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x4) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 64 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(32) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(32) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg3(dst, src, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint64x8 x y) - // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] y x)) + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec64x8) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x16 x y) - // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] y x)) + // match: (Move [5] dst src mem) + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x16) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 5 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x32 x y) - // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] y x)) + // match: (Move [6] dst src mem) + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x32) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 6 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (NotEqualUint8x64 x y) - // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] y x)) + // match: (Move [7] dst src mem) + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPMOVMToVec8x64) - v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) - v0.AuxInt = int8ToAuxInt(4) - v0.AddArg2(y, x) - v.AddArg(v0) + if auxIntToInt64(v.AuxInt) != 7 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOffPtr(v *Value) bool { - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (OffPtr [off] ptr) - // cond: is32Bit(off) - // result: (ADDQconst [int32(off)] ptr) + // match: (Move [9] dst src mem) + // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - if !(is32Bit(off)) { + if auxIntToInt64(v.AuxInt) != 9 { break } - v.reset(OpAMD64ADDQconst) - v.AuxInt = int32ToAuxInt(int32(off)) - v.AddArg(ptr) + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVBstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } - // match: (OffPtr [off] ptr) - // result: (ADDQ (MOVQconst [off]) ptr) + // match: (Move [10] dst src mem) + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - off := auxIntToInt64(v.AuxInt) - ptr := v_0 - v.reset(OpAMD64ADDQ) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(off) - v.AddArg2(v0, ptr) + if auxIntToInt64(v.AuxInt) != 10 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x16 x y) - // result: (VORPS512 y x) + // match: (Move [11] dst src mem) + // result: (MOVLstore [7] dst (MOVLload [7] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS512) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 11 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(7) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x4 x y) - // result: (VORPS128 y x) + // match: (Move [12] dst src mem) + // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS128) - v.AddArg2(y, x) + if auxIntToInt64(v.AuxInt) != 12 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpAMD64MOVLstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat32x8 x y) - // result: (VORPS256 y x) + // match: (Move [s] dst src mem) + // cond: s >= 13 && s <= 15 + // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPS256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s >= 13 && s <= 15) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = int32ToAuxInt(int32(s - 8)) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(int32(s - 8)) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) return true } -} -func rewriteValueAMD64_OpOrFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x2 x y) - // result: (VORPD128 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 <= 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD128) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x4 x y) - // result: (VORPD256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16 && s%16 != 0 && s%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = int64ToAuxInt(s - s%16) + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = int64ToAuxInt(s % 16) + v0.AddArg(dst) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = int64ToAuxInt(s % 16) + v1.AddArg(src) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v3.AddArg2(src, mem) + v2.AddArg3(dst, v3, mem) + v.AddArg3(v0, v1, v2) return true } -} -func rewriteValueAMD64_OpOrFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrFloat64x8 x y) - // result: (VORPD512 y x) + // match: (Move [s] dst src mem) + // cond: s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s) + // result: (DUFFCOPY [s] dst src mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VORPD512) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = int64ToAuxInt(s) + v.AddArg3(dst, src, mem) return true } -} -func rewriteValueAMD64_OpOrInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (OrInt16x16 x y) - // result: (VPOR256 y x) + // match: (Move [s] dst src mem) + // cond: s > 16*64 && s%8 == 0 && logLargeCopy(v, s) + // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + s := auxIntToInt64(v.AuxInt) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) { + break + } + v.reset(OpAMD64REPMOVSQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(s / 8) + v.AddArg4(dst, src, v0, mem) return true } + return false } -func rewriteValueAMD64_OpOrInt16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg32F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt16x8 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32F x) + // result: (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32) + v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1))) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x16(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNeg64F(v *Value) bool { v_0 := v.Args[0] - // match: (OrInt32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64F x) + // result: (PXOR x (MOVSDconst [math.Copysign(0, -1)])) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64PXOR) + v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64) + v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1)) + v.AddArg2(x, v0) return true } } -func rewriteValueAMD64_OpOrInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNeq16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x4 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq16 x y) + // result: (SETNE (CMPW x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNeq32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt32x8 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq32 x y) + // result: (SETNE (CMPL x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x2(v *Value) bool { +func rewriteValueAMD64_OpNeq32F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x2 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq32F x y) + // result: (SETNEF (UCOMISS x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x4(v *Value) bool { +func rewriteValueAMD64_OpNeq64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x4 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (Neq64 x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt64x8(v *Value) bool { +func rewriteValueAMD64_OpNeq64F(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + // match: (Neq64F x y) + // result: (SETNEF (UCOMISD x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x16(v *Value) bool { +func rewriteValueAMD64_OpNeq8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x16 x y) - // result: (VPOR128 y x) + b := v.Block + // match: (Neq8 x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrInt8x32(v *Value) bool { +func rewriteValueAMD64_OpNeqB(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrInt8x32 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqB x y) + // result: (SETNE (CMPB x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNeqPtr(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint16x16 x y) - // result: (VPOR256 y x) + b := v.Block + // match: (NeqPtr x y) + // result: (SETNE (CMPQ x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint16x8(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpNot(v *Value) bool { v_0 := v.Args[0] - // match: (OrUint16x8 x y) - // result: (VPOR128 y x) + // match: (Not x) + // result: (XORLconst [1] x) for { x := v_0 - y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64XORLconst) + v.AuxInt = int32ToAuxInt(1) + v.AddArg(x) return true } } -func rewriteValueAMD64_OpOrUint32x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x16 x y) - // result: (VPORD512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat32x16 x y) + // result: (VPMOVMToVec32x16 (VCMPPS512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORD512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPS512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x4 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat32x4 x y) + // result: (VCMPPS128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint32x8 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat32x8 x y) + // result: (VCMPPS256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPS256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x2 x y) - // result: (VPOR128 y x) + // match: (NotEqualFloat64x2 x y) + // result: (VCMPPD128 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD128) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x4 x y) - // result: (VPOR256 y x) + // match: (NotEqualFloat64x4 x y) + // result: (VCMPPD256 [4] x y) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VCMPPD256) + v.AuxInt = int8ToAuxInt(4) + v.AddArg2(x, y) return true } } -func rewriteValueAMD64_OpOrUint64x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualFloat64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint64x8 x y) - // result: (VPORQ512 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualFloat64x8 x y) + // result: (VPMOVMToVec64x8 (VCMPPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPORQ512) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VCMPPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x16 x y) - // result: (VPOR128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpOrUint8x32(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (OrUint8x32 x y) - // result: (VPOR256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPOR256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x4 x y) - // result: (VHADDPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat32x8 x y) - // result: (VHADDPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x2 x y) - // result: (VHADDPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddFloat64x4 x y) - // result: (VHADDPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHADDPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddInt32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x16 x y) - // result: (VPHADDW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualInt8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint16x8 x y) - // result: (VPHADDW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualInt8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x4 x y) - // result: (VPHADDD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x16 x y) + // result: (VPMOVMToVec16x16 (VPCMPUW256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseAddUint32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseAddUint32x8 x y) - // result: (VPHADDD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x32 x y) + // result: (VPMOVMToVec16x32 (VPCMPUW512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHADDD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint16x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x4 x y) - // result: (VHSUBPS128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint16x8 x y) + // result: (VPMOVMToVec16x8 (VPCMPUW128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec16x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUW128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat32x8 x y) - // result: (VHSUBPS256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x16 x y) + // result: (VPMOVMToVec32x16 (VPCMPUD512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPS256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x2(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x2 x y) - // result: (VHSUBPD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x4 x y) + // result: (VPMOVMToVec32x4 (VPCMPUD128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubFloat64x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint32x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubFloat64x4 x y) - // result: (VHSUBPD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint32x8 x y) + // result: (VPMOVMToVec32x8 (VPCMPUD256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VHSUBPD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec32x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUD256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x2(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x2 x y) + // result: (VPMOVMToVec64x2 (VPCMPUQ128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x2) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x4(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x4 x y) + // result: (VPMOVMToVec64x4 (VPCMPUQ256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x4) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x4(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint64x8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint64x8 x y) + // result: (VPMOVMToVec64x8 (VPCMPUQ512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec64x8) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUQ512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubInt32x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubInt32x8 x y) - // result: (VPHSUBD256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x16 x y) + // result: (VPMOVMToVec8x16 (VPCMPUB128 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x16) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB128, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x16(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x16 x y) - // result: (VPHSUBW256 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x32 x y) + // result: (VPMOVMToVec8x32 (VPCMPUB256 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW256) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x32) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB256, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint16x8(v *Value) bool { +func rewriteValueAMD64_OpNotEqualUint8x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (PairwiseSubUint16x8 x y) - // result: (VPHSUBW128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (NotEqualUint8x64 x y) + // result: (VPMOVMToVec8x64 (VPCMPUB512 [4] x y)) for { x := v_0 y := v_1 - v.reset(OpAMD64VPHSUBW128) - v.AddArg2(y, x) + v.reset(OpAMD64VPMOVMToVec8x64) + v0 := b.NewValue0(v.Pos, OpAMD64VPCMPUB512, typ.Mask) + v0.AuxInt = int8ToAuxInt(4) + v0.AddArg2(x, y) + v.AddArg(v0) return true } } -func rewriteValueAMD64_OpPairwiseSubUint32x4(v *Value) bool { - v_1 := v.Args[1] +func rewriteValueAMD64_OpOffPtr(v *Value) bool { v_0 := v.Args[0] - // match: (PairwiseSubUint32x4 x y) - // result: (VPHSUBD128 y x) + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDQconst [int32(off)] ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD128) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if !(is32Bit(off)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) return true } -} -func rewriteValueAMD64_OpPairwiseSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (PairwiseSubUint32x8 x y) - // result: (VPHSUBD256 y x) + // match: (OffPtr [off] ptr) + // result: (ADDQ (MOVQconst [off]) ptr) for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBD256) - v.AddArg2(y, x) + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpAMD64ADDQ) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(off) + v.AddArg2(v0, ptr) return true } } @@ -46426,270 +43218,6 @@ func rewriteValueAMD64_OpPopCount8(v *Value) bool { return true } } -func rewriteValueAMD64_OpPopCountInt16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountInt8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountInt8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x16 x) - // result: (VPOPCNTW256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x32 x) - // result: (VPOPCNTW512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint16x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint16x8 x) - // result: (VPOPCNTW128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTW128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x16 x) - // result: (VPOPCNTD512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x4 x) - // result: (VPOPCNTD128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint32x8 x) - // result: (VPOPCNTD256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x2 x) - // result: (VPOPCNTQ128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x4 x) - // result: (VPOPCNTQ256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint64x8 x) - // result: (VPOPCNTQ512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTQ512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x16(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x16 x) - // result: (VPOPCNTB128 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x32(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x32 x) - // result: (VPOPCNTB256 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpPopCountUint8x64(v *Value) bool { - v_0 := v.Args[0] - // match: (PopCountUint8x64 x) - // result: (VPOPCNTB512 x) - for { - x := v_0 - v.reset(OpAMD64VPOPCNTB512) - v.AddArg(x) - return true - } -} func rewriteValueAMD64_OpRoundToEven(v *Value) bool { v_0 := v.Args[0] // match: (RoundToEven x) @@ -48030,370 +44558,6 @@ func rewriteValueAMD64_OpRsh8x8(v *Value) bool { } return false } -func rewriteValueAMD64_OpSaturatedAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddInt8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x16 x y) - // result: (VPADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x32 x y) - // result: (VPADDSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint16x8 x y) - // result: (VPADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x16 x y) - // result: (VPADDSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x32 x y) - // result: (VPADDSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedAddUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedAddUint8x64 x y) - // result: (VPADDSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPADDSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x16 x y) - // result: (VPHADDSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseAddInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseAddInt16x8 x y) - // result: (VPHADDSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHADDSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x16 x y) - // result: (VPHSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedPairwiseSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedPairwiseSubInt16x8 x y) - // result: (VPHSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPHSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubInt8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x16 x y) - // result: (VPSUBSW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x32 x y) - // result: (VPSUBSW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint16x8 x y) - // result: (VPSUBSW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x16 x y) - // result: (VPSUBSB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x32 x y) - // result: (VPSUBSB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSaturatedSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SaturatedSubUint8x64 x y) - // result: (VPSUBSB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBSB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSelect0(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48819,84 +44983,6 @@ func rewriteValueAMD64_OpSelectN(v *Value) bool { } return false } -func rewriteValueAMD64_OpSignInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x16 x y) - // result: (VPSIGNW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt16x8 x y) - // result: (VPSIGNW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x4 x y) - // result: (VPSIGND128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt32x8 x y) - // result: (VPSIGND256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGND256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x16 x y) - // result: (VPSIGNB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSignInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SignInt8x32 x y) - // result: (VPSIGNB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSIGNB256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpSlicemask(v *Value) bool { v_0 := v.Args[0] b := v.Block @@ -48941,79 +45027,13 @@ func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool { // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) for { x := v_0 - y := v_1 - v.reset(OpAMD64CMOVQHI) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) - v0.AuxInt = int64ToAuxInt(0) - v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) - v1.AddArg2(x, y) - v.AddArg3(x, v0, v1) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x16(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x16 x) - // result: (VSQRTPS512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS512) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x4 x) - // result: (VSQRTPS128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat32x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat32x8 x) - // result: (VSQRTPS256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPS256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x2(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x2 x) - // result: (VSQRTPD128 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD128) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x4(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x4 x) - // result: (VSQRTPD256 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD256) - v.AddArg(x) - return true - } -} -func rewriteValueAMD64_OpSqrtFloat64x8(v *Value) bool { - v_0 := v.Args[0] - // match: (SqrtFloat64x8 x) - // result: (VSQRTPD512 x) - for { - x := v_0 - v.reset(OpAMD64VSQRTPD512) - v.AddArg(x) + y := v_1 + v.reset(OpAMD64CMOVQHI) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags) + v1.AddArg2(x, y) + v.AddArg3(x, v0, v1) return true } } @@ -49158,396 +45178,6 @@ func rewriteValueAMD64_OpStore(v *Value) bool { } return false } -func rewriteValueAMD64_OpSubFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x16 x y) - // result: (VADDPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x4 x y) - // result: (VADDPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat32x8 x y) - // result: (VADDPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x2 x y) - // result: (VADDPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x4 x y) - // result: (VADDPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubFloat64x8 x y) - // result: (VADDPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VADDPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubInt8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubInt8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x16 x y) - // result: (VPSUBW256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x32 x y) - // result: (VPSUBW512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint16x8 x y) - // result: (VPSUBW128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBW128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x16 x y) - // result: (VPSUBD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x4 x y) - // result: (VPSUBD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint32x8 x y) - // result: (VPSUBD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x2 x y) - // result: (VPSUBQ128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x4 x y) - // result: (VPSUBQ256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint64x8 x y) - // result: (VPSUBQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x16 x y) - // result: (VPSUBB128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x32 x y) - // result: (VPSUBB256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpSubUint8x64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (SubUint8x64 x y) - // result: (VPSUBB512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPSUBB512) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpTrunc(v *Value) bool { v_0 := v.Args[0] // match: (Trunc x) @@ -49560,344 +45190,6 @@ func rewriteValueAMD64_OpTrunc(v *Value) bool { return true } } -func rewriteValueAMD64_OpXorFloat32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x16 x y) - // result: (VXORPS512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x4 x y) - // result: (VXORPS128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat32x8 x y) - // result: (VXORPS256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPS256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x2 x y) - // result: (VXORPD128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x4 x y) - // result: (VXORPD256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorFloat64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorFloat64x8 x y) - // result: (VXORPD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VXORPD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorInt8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorInt8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x16 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint16x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint16x8 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x16 x y) - // result: (VPXORD512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORD512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x4 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint32x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint32x8 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x2(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x2 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x4(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x4 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint64x8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint64x8 x y) - // result: (VPXORQ512 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXORQ512) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x16 x y) - // result: (VPXOR128 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR128) - v.AddArg2(y, x) - return true - } -} -func rewriteValueAMD64_OpXorUint8x32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (XorUint8x32 x y) - // result: (VPXOR256 y x) - for { - x := v_0 - y := v_1 - v.reset(OpAMD64VPXOR256) - v.AddArg2(y, x) - return true - } -} func rewriteValueAMD64_OpZero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssagen/simdintrinsics.go b/src/cmd/compile/internal/ssagen/simdintrinsics.go index cf3c1813e4..3c8104ec2c 100644 --- a/src/cmd/compile/internal/ssagen/simdintrinsics.go +++ b/src/cmd/compile/internal/ssagen/simdintrinsics.go @@ -1083,408 +1083,408 @@ func simdIntrinsics(addF func(pkg, fn string, b intrinsicBuilder, archFamilies . addF(simdPackage, "Uint8x64.MaskedSaturatedAdd", opLen3(ssa.OpMaskedSaturatedAddUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSaturatedSub", opLen3(ssa.OpMaskedSaturatedSubUint8x64, types.TypeVec512), sys.AMD64) addF(simdPackage, "Uint8x64.MaskedSub", opLen3(ssa.OpMaskedSubUint8x64, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x8.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x16.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x32.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x32.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x16.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x16.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int16x8.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float32x4.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Float64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x16.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Uint32x4.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Uint32x4.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint32x8.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x2.AsUint8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x4.AsUint8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint64x8.AsUint8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsFloat64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x16.AsUint64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsFloat64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x32.AsUint64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsFloat64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Uint8x64.AsUint64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat64x2", simdLoad(), sys.AMD64) addF(simdPackage, "Float64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt64x2", simdLoad(), sys.AMD64) - addF(simdPackage, "Int64x2.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint16x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x4", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x2", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x2.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Int16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x4", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x8", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt8x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint64x4", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x4.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint8x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadMask8x32", simdLoad(), sys.AMD64) addF(simdPackage, "Mask8x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint32x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint32x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt64x8", simdLoad(), sys.AMD64) addF(simdPackage, "Int64x8.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadInt8x64", simdLoad(), sys.AMD64) addF(simdPackage, "Int8x64.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadMask8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Mask8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadFloat32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Float32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt32x16", simdLoad(), sys.AMD64) - addF(simdPackage, "Int32x16.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadFloat64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Float64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint16x32", simdLoad(), sys.AMD64) addF(simdPackage, "Uint16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadMask16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Mask16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadInt16x32", simdLoad(), sys.AMD64) - addF(simdPackage, "Int16x32.Store", simdStore(), sys.AMD64) - addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) - addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) addF(simdPackage, "LoadUint32x16", simdLoad(), sys.AMD64) addF(simdPackage, "Uint32x16.Store", simdStore(), sys.AMD64) - addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "LoadUint64x8", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint64x8.Store", simdStore(), sys.AMD64) + addF(simdPackage, "LoadUint8x64", simdLoad(), sys.AMD64) + addF(simdPackage, "Uint8x64.Store", simdStore(), sys.AMD64) addF(simdPackage, "Mask16x16.AsInt16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x16.AsMask16x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x16.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask16x16.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) - addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.AsInt16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x32.AsMask16x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x32.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) addF(simdPackage, "Mask16x32.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) - addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) - addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) - addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.AsInt16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int16x8.AsMask16x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask16x8.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask16x8.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x16.AsInt32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x16.AsMask32x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x16.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x16.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask32x4.AsInt32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x4.AsMask32x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x4.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x4.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask32x8.AsInt32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int32x8.AsMask32x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask32x8.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask32x8.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) addF(simdPackage, "Mask64x2.AsInt64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Int64x2.AsMask64x2", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) addF(simdPackage, "Mask64x2.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) addF(simdPackage, "Mask64x2.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask64x4.AsInt64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x4.AsMask64x4", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x4.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x4.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask64x8.AsInt64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int64x8.AsMask64x8", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask64x8.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask64x8.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x16.AsInt8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x16.AsMask8x16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x16.And", opLen2(ssa.OpAndInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x16.Or", opLen2(ssa.OpOrInt32x4, types.TypeVec128), sys.AMD64) + addF(simdPackage, "Mask8x32.AsInt8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x32.AsMask8x32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x32.And", opLen2(ssa.OpAndInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x32.Or", opLen2(ssa.OpOrInt32x8, types.TypeVec256), sys.AMD64) + addF(simdPackage, "Mask8x64.AsInt8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Int8x64.AsMask8x64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return args[0] }, sys.AMD64) + addF(simdPackage, "Mask8x64.And", opLen2(ssa.OpAndInt32x16, types.TypeVec512), sys.AMD64) + addF(simdPackage, "Mask8x64.Or", opLen2(ssa.OpOrInt32x16, types.TypeVec512), sys.AMD64) } func opLen1(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -1505,6 +1505,76 @@ func opLen3(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa } } +func opLen4(op ssa.Op, t *types.Type) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return s.newValue4(op, t, args[0], args[1], args[2], args[3]) + } +} + +func plainPanicSimdImm(s *state) { + cmp := s.newValue0(ssa.OpConstBool, types.Types[types.TBOOL]) + cmp.AuxInt = 1 + // TODO: make this a standalone panic instead of reusing the overflow panic. + // Or maybe after we implement the switch table this will be obsolete anyway. + s.check(cmp, ir.Syms.Panicoverflow) +} + +func opLen1Imm8(op ssa.Op, t *types.Type, offset int) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { + if args[1].Op == ssa.OpConst8 { + return s.newValue1I(op, t, args[1].AuxInt<