From: Keith Randall Date: Thu, 4 Aug 2022 05:58:30 +0000 (-0700) Subject: cmd/compile: move SSA rotate instruction detection to arch-independent rules X-Git-Tag: go1.20rc1~1441 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=60ad3c48f59c35981dd872ed5dfe74e4d6becab2;p=gostls13.git cmd/compile: move SSA rotate instruction detection to arch-independent rules Detect rotate instructions while still in architecture-independent form. It's easier to do here, and we don't need to repeat it in each architecture file. Change-Id: I9396954b3f3b3bfb96c160d064a02002309935bb Reviewed-on: https://go-review.googlesource.com/c/go/+/421195 TryBot-Result: Gopher Robot Reviewed-by: Eric Fang Reviewed-by: Cuong Manh Le Reviewed-by: David Chase Reviewed-by: Joedian Reid Reviewed-by: Ruinan Sun Run-TryBot: Keith Randall --- diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index 7bdebedafe..0a84ba2301 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -148,10 +148,14 @@ (Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15]) (Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7]) +// rotates +(RotateLeft32 ...) => (ROLL ...) +(RotateLeft16 ...) => (ROLW ...) +(RotateLeft8 ...) => (ROLB ...) // constant rotates -(RotateLeft32 x (MOVLconst [c])) => (ROLLconst [c&31] x) -(RotateLeft16 x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x) -(RotateLeft8 x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x) +(ROLL x (MOVLconst [c])) => (ROLLconst [c&31] x) +(ROLW x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x) +(ROLB x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x) // Lowering comparisons (Less32 x y) => (SETL (CMPL x y)) @@ -425,29 +429,10 @@ // Rotate instructions -(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x) -( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x) -(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x) - -(ADDL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2 - => (ROLWconst x [int16(c)]) -( ORL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2 - => (ROLWconst x [int16(c)]) -(XORL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2 - => (ROLWconst x [int16(c)]) - -(ADDL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1 - => (ROLBconst x [int8(c)]) -( ORL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1 - => (ROLBconst x [int8(c)]) -(XORL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1 - => (ROLBconst x [int8(c)]) - (ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x) (ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x) (ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x) - // Constant shift simplifications (SHLLconst x [0]) => x diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index 8ec9c68d7f..88e061151e 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -275,6 +275,9 @@ func init() { {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15 {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7 + {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // 32 bits of arg0 rotate left by arg1 + {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // low 16 bits of arg0 rotate left by arg1 + {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // low 8 bits of arg0 rotate left by arg1 {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31 {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 6a96b2d61e..e66718d70e 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -853,86 +853,11 @@ ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x y) ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL|SHLXL|SHRXL|SARXL) x (NEGL y)) -// Constant rotate instructions -((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c]) -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c]) - -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c]) -((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c]) - -(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x) -(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x) -(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x) -(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x) - (RotateLeft8 ...) => (ROLB ...) (RotateLeft16 ...) => (ROLW ...) (RotateLeft32 ...) => (ROLL ...) (RotateLeft64 ...) => (ROLQ ...) -// Non-constant rotates. -// We want to issue a rotate when the Go source contains code like -// y &= 63 -// x << y | x >> (64-y) -// The shift rules above convert << to SHLx and >> to SHRx. -// SHRx converts its shift argument from 64-y to -y. -// A tricky situation occurs when y==0. Then the original code would be: -// x << 0 | x >> 64 -// But x >> 64 is 0, not x. So there's an additional mask that is ANDed in -// to force the second term to 0. We don't need that mask, but we must match -// it in order to strip it out. -(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y) -(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y) -(ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y) -(ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y) - -(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y) -(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y) -(ORL (SHLXL x y) (ANDL (SHRXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y) -(ORL (SHRXL x y) (ANDL (SHLXL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y) - -// Help with rotate detection -(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT) -(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT) - -(ORL (SHLL x (AND(Q|L)const y [15])) - (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))) - (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16])))) - && v.Type.Size() == 2 - => (ROLW x y) -(ORL (SHRW x (AND(Q|L)const y [15])) - (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))) - && v.Type.Size() == 2 - => (RORW x y) -(ORL (SHLXL x (AND(Q|L)const y [15])) - (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))) - (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16])))) - && v.Type.Size() == 2 - => (ROLW x y) -(ORL (SHRW x (AND(Q|L)const y [15])) - (SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))) - && v.Type.Size() == 2 - => (RORW x y) - -(ORL (SHLL x (AND(Q|L)const y [ 7])) - (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))) - (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8])))) - && v.Type.Size() == 1 - => (ROLB x y) -(ORL (SHRB x (AND(Q|L)const y [ 7])) - (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))) - && v.Type.Size() == 1 - => (RORB x y) -(ORL (SHLXL x (AND(Q|L)const y [ 7])) - (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))) - (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8])))) - && v.Type.Size() == 1 - => (ROLB x y) -(ORL (SHRB x (AND(Q|L)const y [ 7])) - (SHLXL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))) - && v.Type.Size() == 1 - => (RORB x y) - // rotate left negative = rotate right (ROLQ x (NEG(Q|L) y)) => (RORQ x y) (ROLL x (NEG(Q|L) y)) => (RORL x y) diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 7328461972..e5898b0369 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -1130,14 +1130,6 @@ (CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c]) (CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c]) -// Generate rotates -(ADDshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x) -( ORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x) -(XORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x) -(ADDshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x) -( ORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x) -(XORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x) - (RotateLeft16 x (MOVWconst [c])) => (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) (RotateLeft8 x (MOVWconst [c])) => (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) (RotateLeft32 x y) => (SRR x (RSBconst [0] y)) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 3cbfea83cf..076438edb7 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -1820,56 +1820,9 @@ (ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1]) (ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1]) -// Generate rotates with const shift -(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x) -( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x) -(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x) -(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x) -( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x) -(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x) - -(ADDshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - => (RORWconst [32-c] x) -( ORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - => (RORWconst [32-c] x) -(XORshiftLL [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - => (RORWconst [32-c] x) -(ADDshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x) -( ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x) -(XORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x) - (RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x) (RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x) -// Generate rotates with non-const shift. -// These rules match the Go source code like -// y &= 63 -// x << y | x >> (64-y) -// "|" can also be "^" or "+". -// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y). -((ADD|OR|XOR) (SLL x (ANDconst [63] y)) - (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) - (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) && cc == OpARM64LessThanU - => (ROR x (NEG y)) -((ADD|OR|XOR) (SRL x (ANDconst [63] y)) - (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) - (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) && cc == OpARM64LessThanU - => (ROR x y) - -// These rules match the Go source code like -// y &= 31 -// x << y | x >> (32-y) -// "|" can also be "^" or "+". -// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y). -((ADD|OR|XOR) (SLL x (ANDconst [31] y)) - (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) - (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) && cc == OpARM64LessThanU - => (RORW x (NEG y)) -((ADD|OR|XOR) (SRL (MOVWUreg x) (ANDconst [31] y)) - (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) - (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) && cc == OpARM64LessThanU - => (RORW x y) - // rev16w | rev16 // ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+". ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 8)] x) x) => (REV16W x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 859bf0aa0c..55e59f23a1 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -128,39 +128,8 @@ // Rotates (RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) (RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) -(RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x) -(RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x) - -// Rotate generation with const shift -(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x) -( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x) -(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x) - -(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x) -( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x) -(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x) - -// Rotate generation with non-const shift -// these match patterns from math/bits/RotateLeft[32|64], but there could be others -(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) => (ROTL x y) -(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) -( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))))=> (ROTL x y) -( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) -(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) => (ROTL x y) -(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) - - -(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) -(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) -( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) -( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) -(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) -(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) - - -// Lowering rotates -(RotateLeft32 x y) => (ROTLW x y) -(RotateLeft64 x y) => (ROTL x y) +(RotateLeft32 ...) => (ROTLW ...) +(RotateLeft64 ...) => (ROTL ...) // Constant rotate generation (ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31]) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index b3928c6a1e..8c48d6f601 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -691,10 +691,6 @@ (RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))}) (RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)]) -// Match rotate by constant pattern. -((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)}) -((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c]) - // Signed 64-bit comparison with immediate. (CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)]) (CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)])) diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules index b0f10d0a0f..ba776af1a7 100644 --- a/src/cmd/compile/internal/ssa/gen/dec64.rules +++ b/src/cmd/compile/internal/ssa/gen/dec64.rules @@ -217,6 +217,11 @@ (Rsh8x64 x y) => (Rsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) (Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo) +(RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo) +(RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo) +(RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo) + // Clean up constants a little (Or32 (Zeromask (Const32 [c])) y) && c == 0 => y (Or32 (Zeromask (Const32 [c])) y) && c != 0 => (Const32 [-1]) diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 744cc839f4..e56ee4a6f1 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -2551,3 +2551,83 @@ // Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go). // However, this rule is needed to prevent the previous rule from looping forever in such cases. (Move dst src mem) && isSamePtr(dst, src) => mem + +// Constant rotate detection. +((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z) +((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z) +((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z) +((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z) + +// Non-constant rotate detection. +// We use shiftIsBounded to make sure that neither of the shifts are >64. +// Note: these rules are subtle when the shift amounts are 0/64, as Go shifts +// are different from most native shifts. But it works out. +((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) + +((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) + +((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) + +((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) + +((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) + +((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) + +((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) + +((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) + +// Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y. +(RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y) +(RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y) +(RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y) +(RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y) + +// Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y. +(RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) y)) +(RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) y)) +(RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) y)) +(RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) y)) + +// Rotating by y+c, with c a multiple of the value width, is the same as rotating by y. +(RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y) +(RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y) +(RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y) +(RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y) + +// Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y. +(RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) y)) +(RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) y)) +(RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) y)) +(RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) y)) + +// Ensure we don't do Const64 rotates in a 32-bit system. +(RotateLeft64 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 [int32(c)])) +(RotateLeft32 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 [int32(c)])) +(RotateLeft16 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 [int32(c)])) +(RotateLeft8 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 [int32(c)])) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index eb508afe30..d09b9aab75 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -249,14 +249,19 @@ var genericOps = []opData{ {name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0] {name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0] - {name: "PopCount8", argLength: 1}, // Count bits in arg[0] - {name: "PopCount16", argLength: 1}, // Count bits in arg[0] - {name: "PopCount32", argLength: 1}, // Count bits in arg[0] - {name: "PopCount64", argLength: 1}, // Count bits in arg[0] - {name: "RotateLeft8", argLength: 2}, // Rotate bits in arg[0] left by arg[1] - {name: "RotateLeft16", argLength: 2}, // Rotate bits in arg[0] left by arg[1] - {name: "RotateLeft32", argLength: 2}, // Rotate bits in arg[0] left by arg[1] - {name: "RotateLeft64", argLength: 2}, // Rotate bits in arg[0] left by arg[1] + {name: "PopCount8", argLength: 1}, // Count bits in arg[0] + {name: "PopCount16", argLength: 1}, // Count bits in arg[0] + {name: "PopCount32", argLength: 1}, // Count bits in arg[0] + {name: "PopCount64", argLength: 1}, // Count bits in arg[0] + + // RotateLeftX instructions rotate the X bits of arg[0] to the left + // by the low lg_2(X) bits of arg[1], interpreted as an unsigned value. + // Note that this works out regardless of the bit width or signedness of + // arg[1]. In particular, RotateLeft by x is the same as RotateRight by -x. + {name: "RotateLeft64", argLength: 2}, + {name: "RotateLeft32", argLength: 2}, + {name: "RotateLeft16", argLength: 2}, + {name: "RotateLeft8", argLength: 2}, // Square root. // Special cases: diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index c156d51ac0..2e94f2e624 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -434,6 +434,9 @@ const ( Op386SARLconst Op386SARWconst Op386SARBconst + Op386ROLL + Op386ROLW + Op386ROLB Op386ROLLconst Op386ROLWconst Op386ROLBconst @@ -2963,10 +2966,10 @@ const ( OpPopCount16 OpPopCount32 OpPopCount64 - OpRotateLeft8 - OpRotateLeft16 - OpRotateLeft32 OpRotateLeft64 + OpRotateLeft32 + OpRotateLeft16 + OpRotateLeft8 OpSqrt OpSqrt32 OpFloor @@ -4636,6 +4639,54 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ROLL", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLW", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "ROLB", + argLen: 2, + resultInArg0: true, + clobberFlags: true, + asm: x86.AROLB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 2}, // CX + {0, 239}, // AX CX DX BX BP SI DI + }, + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "ROLLconst", auxType: auxInt32, @@ -38457,22 +38508,22 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "RotateLeft8", + name: "RotateLeft64", argLen: 2, generic: true, }, { - name: "RotateLeft16", + name: "RotateLeft32", argLen: 2, generic: true, }, { - name: "RotateLeft32", + name: "RotateLeft16", argLen: 2, generic: true, }, { - name: "RotateLeft64", + name: "RotateLeft8", argLen: 2, generic: true, }, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 13eb86ade1..91f8fd7f72 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1977,3 +1977,20 @@ func makeJumpTableSym(b *Block) *obj.LSym { s.Set(obj.AttrLocal, true) return s } + +// canRotate reports whether the architecture supports +// rotates of integer registers with the given number of bits. +func canRotate(c *Config, bits int64) bool { + if bits > c.PtrSize*8 { + // Don't rewrite to rotates bigger than the machine word. + return false + } + switch c.arch { + case "386", "amd64": + return true + case "arm", "arm64", "s390x", "ppc64", "ppc64le", "wasm": + return bits >= 32 + default: + return false + } +} diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 34f37867cf..8372e72600 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -146,10 +146,16 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_Op386ORLload(v) case Op386ORLmodify: return rewriteValue386_Op386ORLmodify(v) + case Op386ROLB: + return rewriteValue386_Op386ROLB(v) case Op386ROLBconst: return rewriteValue386_Op386ROLBconst(v) + case Op386ROLL: + return rewriteValue386_Op386ROLL(v) case Op386ROLLconst: return rewriteValue386_Op386ROLLconst(v) + case Op386ROLW: + return rewriteValue386_Op386ROLW(v) case Op386ROLWconst: return rewriteValue386_Op386ROLWconst(v) case Op386SARB: @@ -541,11 +547,14 @@ func rewriteValue386(v *Value) bool { case OpPanicExtend: return rewriteValue386_OpPanicExtend(v) case OpRotateLeft16: - return rewriteValue386_OpRotateLeft16(v) + v.Op = Op386ROLW + return true case OpRotateLeft32: - return rewriteValue386_OpRotateLeft32(v) + v.Op = Op386ROLL + return true case OpRotateLeft8: - return rewriteValue386_OpRotateLeft8(v) + v.Op = Op386ROLB + return true case OpRound32F: v.Op = OpCopy return true @@ -734,80 +743,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } break } - // match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRLconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(Op386ROLLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == int16(16-c) && t.Size() == 2 - // result: (ROLWconst x [int16(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRWconst { - continue - } - d := auxIntToInt16(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) { - continue - } - v.reset(Op386ROLWconst) - v.AuxInt = int16ToAuxInt(int16(c)) - v.AddArg(x) - return true - } - break - } - // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == int8(8-c) && t.Size() == 1 - // result: (ROLBconst x [int8(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRBconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) { - continue - } - v.reset(Op386ROLBconst) - v.AuxInt = int8ToAuxInt(int8(c)) - v.AddArg(x) - return true - } - break - } // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { @@ -6305,80 +6240,6 @@ func rewriteValue386_Op386ORL(v *Value) bool { } break } - // match: ( ORL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRLconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(Op386ROLLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: ( ORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == int16(16-c) && t.Size() == 2 - // result: (ROLWconst x [int16(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRWconst { - continue - } - d := auxIntToInt16(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) { - continue - } - v.reset(Op386ROLWconst) - v.AuxInt = int16ToAuxInt(int16(c)) - v.AddArg(x) - return true - } - break - } - // match: ( ORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == int8(8-c) && t.Size() == 1 - // result: (ROLBconst x [int8(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRBconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) { - continue - } - v.reset(Op386ROLBconst) - v.AuxInt = int8ToAuxInt(int8(c)) - v.AddArg(x) - return true - } - break - } // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (ORLload x [off] {sym} ptr mem) @@ -6809,6 +6670,24 @@ func rewriteValue386_Op386ORLmodify(v *Value) bool { } return false } +func rewriteValue386_Op386ROLB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLB x (MOVLconst [c])) + // result: (ROLBconst [int8(c&7)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLBconst) + v.AuxInt = int8ToAuxInt(int8(c & 7)) + v.AddArg(x) + return true + } + return false +} func rewriteValue386_Op386ROLBconst(v *Value) bool { v_0 := v.Args[0] // match: (ROLBconst [c] (ROLBconst [d] x)) @@ -6837,6 +6716,24 @@ func rewriteValue386_Op386ROLBconst(v *Value) bool { } return false } +func rewriteValue386_Op386ROLL(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLL x (MOVLconst [c])) + // result: (ROLLconst [c&31] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLLconst) + v.AuxInt = int32ToAuxInt(c & 31) + v.AddArg(x) + return true + } + return false +} func rewriteValue386_Op386ROLLconst(v *Value) bool { v_0 := v.Args[0] // match: (ROLLconst [c] (ROLLconst [d] x)) @@ -6865,6 +6762,24 @@ func rewriteValue386_Op386ROLLconst(v *Value) bool { } return false } +func rewriteValue386_Op386ROLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ROLW x (MOVLconst [c])) + // result: (ROLWconst [int16(c&15)] x) + for { + x := v_0 + if v_1.Op != Op386MOVLconst { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(Op386ROLWconst) + v.AuxInt = int16ToAuxInt(int16(c & 15)) + v.AddArg(x) + return true + } + return false +} func rewriteValue386_Op386ROLWconst(v *Value) bool { v_0 := v.Args[0] // match: (ROLWconst [c] (ROLWconst [d] x)) @@ -8346,80 +8261,6 @@ func rewriteValue386_Op386XORL(v *Value) bool { } break } - // match: (XORL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRLconst { - continue - } - d := auxIntToInt32(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(Op386ROLLconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == int16(16-c) && t.Size() == 2 - // result: (ROLWconst x [int16(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRWconst { - continue - } - d := auxIntToInt16(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) { - continue - } - v.reset(Op386ROLWconst) - v.AuxInt = int16ToAuxInt(int16(c)) - v.AddArg(x) - return true - } - break - } - // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == int8(8-c) && t.Size() == 1 - // result: (ROLBconst x [int8(c)]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != Op386SHLLconst { - continue - } - c := auxIntToInt32(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != Op386SHRBconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) { - continue - } - v.reset(Op386ROLBconst) - v.AuxInt = int8ToAuxInt(int8(c)) - v.AddArg(x) - return true - } - break - } // match: (XORL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoadClobber(v, l, x) && clobber(l) // result: (XORLload x [off] {sym} ptr mem) @@ -10298,60 +10139,6 @@ func rewriteValue386_OpPanicExtend(v *Value) bool { } return false } -func rewriteValue386_OpRotateLeft16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft16 x (MOVLconst [c])) - // result: (ROLWconst [int16(c&15)] x) - for { - x := v_0 - if v_1.Op != Op386MOVLconst { - break - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(Op386ROLWconst) - v.AuxInt = int16ToAuxInt(int16(c & 15)) - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_OpRotateLeft32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft32 x (MOVLconst [c])) - // result: (ROLLconst [c&31] x) - for { - x := v_0 - if v_1.Op != Op386MOVLconst { - break - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(Op386ROLLconst) - v.AuxInt = int32ToAuxInt(c & 31) - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_OpRotateLeft8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft8 x (MOVLconst [c])) - // result: (ROLBconst [int8(c&7)] x) - for { - x := v_0 - if v_1.Op != Op386MOVLconst { - break - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(Op386ROLBconst) - v.AuxInt = int8ToAuxInt(int8(c & 7)) - v.AddArg(x) - return true - } - return false -} func rewriteValue386_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 9d8ce8708b..d4af5f03e1 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1276,80 +1276,6 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { } break } - // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRLconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRWconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - continue - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRBconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - continue - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (ADDL x (SHLLconst [3] y)) // result: (LEAL8 x y) for { @@ -1915,30 +1841,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } break } - // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRQconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (ADDQ x (SHLQconst [3] y)) // result: (LEAQ8 x y) for { @@ -7516,40 +7418,6 @@ func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool { func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool { v_0 := v.Args[0] b := v.Block - // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) - // result: (FlagLT_ULT) - for { - if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 { - break - } - v.reset(OpAMD64FlagLT_ULT) - return true - } - // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) - // result: (FlagLT_ULT) - for { - if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 { - break - } - v.reset(OpAMD64FlagLT_ULT) - return true - } // match: (CMPQconst (MOVQconst [x]) [y]) // cond: x==int64(y) // result: (FlagEQ) @@ -16107,1484 +15975,178 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { } break } - // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) + // match: (ORL x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) + // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) + // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { + x0 := v_0 + if x0.Op != OpAMD64MOVBload { continue } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRLconst { + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) + mem := x0.Args[1] + p := x0.Args[0] + sh := v_1 + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBload { continue } - v.reset(OpAMD64ROLLconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) return true } break } - // match: (ORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) + // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) for { - t := v.Type for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { + x0 := v_0 + if x0.Op != OpAMD64MOVBload { continue } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRWconst { + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) + mem := x0.Args[1] + p0 := x0.Args[0] + sh := v_1 + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { continue } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { continue } - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) + _ = x1.Args[1] + p1 := x1.Args[0] + if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p0, mem) return true } break } - // match: (ORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) + // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) + // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) for { - t := v.Type for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { + x0 := v_0 + if x0.Op != OpAMD64MOVWload { continue } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRBconst { + i0 := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) + mem := x0.Args[1] + p := x0.Args[0] + sh := v_1 + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWload { continue } - v.reset(OpAMD64ROLBconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) + i1 := auxIntToInt32(x1.AuxInt) + if auxToSym(x1.Aux) != s { + continue + } + _ = x1.Args[1] + if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { + continue + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i0) + v0.Aux = symToAux(s) + v0.AddArg2(p, mem) return true } break } - // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) - // result: (ROLL x y) + // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) + // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { + x0 := v_0 + if x0.Op != OpAMD64MOVWload { continue } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { + i := auxIntToInt32(x0.AuxInt) + s := auxToSym(x0.Aux) + mem := x0.Args[1] + p0 := x0.Args[0] + sh := v_1 + if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLL) - v.AddArg2(x, y) - return true + x1 := sh.Args[0] + if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { + continue + } + _ = x1.Args[1] + p1 := x1.Args[0] + if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { + continue } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) + v.copyOf(v0) + v0.AuxInt = int32ToAuxInt(i) + v0.Aux = symToAux(s) + v0.AddArg2(p0, mem) + return true } break } - // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) - // result: (ROLL x y) + // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) + // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) + // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { + s1 := v_0 + if s1.Op != OpAMD64SHLLconst { continue } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) - // result: (RORL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) - // result: (RORL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) - // result: (ROLL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRXL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLXL x y) (ANDL (SHRXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) - // result: (ROLL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRXL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32])))) - // result: (RORL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRXL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLXL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRXL x y) (ANDL (SHLXL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32])))) - // result: (RORL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRXL { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLXL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORL) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRW { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64ROLW) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRW { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64ROLW) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64RORW) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64RORW) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHLXL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRW { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64ROLW) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLXL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])))) - // cond: v.Type.Size() == 2 - // result: (ROLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRW { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64ROLW) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRW x (ANDQconst y [15])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLXL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64RORW) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHRW x (ANDLconst y [15])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) - // cond: v.Type.Size() == 2 - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLXL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) { - continue - } - v.reset(OpAMD64RORW) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRB { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64ROLB) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRB { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64ROLB) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRB { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64RORB) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRB { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64RORB) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHLXL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRB { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64ROLB) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHLXL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])))) - // cond: v.Type.Size() == 1 - // result: (ROLB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64ANDL { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRB { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL { - continue - } - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 { - continue - } - v_1_0_1_0_0 := v_1_0_1_0.Args[0] - if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64ROLB) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLXL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRB { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLXL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGQ { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64RORB) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLXL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) - // cond: v.Type.Size() == 1 - // result: (RORB x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRB { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpAMD64SHLXL { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpAMD64NEGL { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) { - continue - } - v.reset(OpAMD64RORB) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORL x x) - // result: x - for { - x := v_0 - if x != v_1 { - break - } - v.copyOf(x) - return true - } - // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpAMD64MOVBload { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[1] - p := x0.Args[0] - sh := v_1 - if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg2(p, mem) - return true - } - break - } - // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpAMD64MOVBload { - continue - } - i := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[1] - p0 := x0.Args[0] - sh := v_1 - if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[1] - p1 := x1.Args[0] - if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i) - v0.Aux = symToAux(s) - v0.AddArg2(p0, mem) - return true - } - break - } - // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpAMD64MOVWload { - continue - } - i0 := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[1] - p := x0.Args[0] - sh := v_1 - if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - continue - } - i1 := auxIntToInt32(x1.AuxInt) - if auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[1] - if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i0) - v0.Aux = symToAux(s) - v0.AddArg2(p, mem) - return true - } - break - } - // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem))) - // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x0 := v_0 - if x0.Op != OpAMD64MOVWload { - continue - } - i := auxIntToInt32(x0.AuxInt) - s := auxToSym(x0.Aux) - mem := x0.Args[1] - p0 := x0.Args[0] - sh := v_1 - if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 { - continue - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s { - continue - } - _ = x1.Args[1] - p1 := x1.Args[0] - if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) { - continue - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32) - v.copyOf(v0) - v0.AuxInt = int32ToAuxInt(i) - v0.Aux = symToAux(s) - v0.AddArg2(p0, mem) - return true - } - break - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or) - // result: @mergePoint(b,x0,x1,y) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - s1 := v_0 - if s1.Op != OpAMD64SHLLconst { - continue - } - j1 := auxIntToInt8(s1.AuxInt) - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { + j1 := auxIntToInt8(s1.AuxInt) + x1 := s1.Args[0] + if x1.Op != OpAMD64MOVBload { continue } i1 := auxIntToInt32(x1.AuxInt) @@ -18274,534 +16836,110 @@ func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool { if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) { break } - v.reset(OpAMD64ORLmodify) - v.AuxInt = int32ToAuxInt(off1 + off2) - v.Aux = symToAux(mergeSym(sym1, sym2)) - v.AddArg3(base, val, mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (ORQ (SHLQ (MOVQconst [1]) y) x) - // result: (BTSQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQ { - continue - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { - continue - } - x := v_1 - v.reset(OpAMD64BTSQ) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORQ (SHLXQ (MOVQconst [1]) y) x) - // result: (BTSQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXQ { - continue - } - y := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { - continue - } - x := v_1 - v.reset(OpAMD64BTSQ) - v.AddArg2(x, y) - return true - } - break - } - // match: (ORQ (MOVQconst [c]) x) - // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 - // result: (BTSQconst [int8(log64(c))] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64MOVQconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_1 - if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { - continue - } - v.reset(OpAMD64BTSQconst) - v.AuxInt = int8ToAuxInt(int8(log64(c))) - v.AddArg(x) - return true - } - break - } - // match: (ORQ x (MOVQconst [c])) - // cond: is32Bit(c) - // result: (ORQconst [int32(c)] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64MOVQconst { - continue - } - c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { - continue - } - v.reset(OpAMD64ORQconst) - v.AuxInt = int32ToAuxInt(int32(c)) - v.AddArg(x) - return true - } - break - } - // match: (ORQ x (MOVLconst [c])) - // result: (ORQconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpAMD64MOVLconst { - continue - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(OpAMD64ORQconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRQconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) - // result: (ROLQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQ { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLQ) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) - // result: (ROLQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQ { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLQ) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) - // result: (RORQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRQ { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORQ) - v.AddArg2(x, y) - return true - } - } - break - } - // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) - // result: (RORQ x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRQ { - continue - } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORQ) - v.AddArg2(x, y) - return true - } - } - break + v.reset(OpAMD64ORLmodify) + v.AuxInt = int32ToAuxInt(off1 + off2) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(base, val, mem) + return true } - // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) - // result: (ROLQ x y) + return false +} +func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (ORQ (SHLQ (MOVQconst [1]) y) x) + // result: (BTSQ x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLXQ { + if v_0.Op != OpAMD64SHLQ { continue } y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRXQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLQ) - v.AddArg2(x, y) - return true - } + x := v_1 + v.reset(OpAMD64BTSQ) + v.AddArg2(x, y) + return true } break } - // match: (ORQ (SHLXQ x y) (ANDQ (SHRXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) - // result: (ROLQ x y) + // match: (ORQ (SHLXQ (MOVQconst [1]) y) x) + // result: (BTSQ x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpAMD64SHLXQ { continue } y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHRXQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64ROLQ) - v.AddArg2(x, y) - return true - } + x := v_1 + v.reset(OpAMD64BTSQ) + v.AddArg2(x, y) + return true } break } - // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64])))) - // result: (RORQ x y) + // match: (ORQ (MOVQconst [c]) x) + // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128 + // result: (BTSQconst [int8(log64(c))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRXQ { + if v_0.Op != OpAMD64MOVQconst { continue } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLXQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGQ { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORQ) - v.AddArg2(x, y) - return true - } + v.reset(OpAMD64BTSQconst) + v.AuxInt = int8ToAuxInt(int8(log64(c))) + v.AddArg(x) + return true } break } - // match: (ORQ (SHRXQ x y) (ANDQ (SHLXQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64])))) - // result: (RORQ x y) + // match: (ORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ORQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHRXQ { + x := v_0 + if v_1.Op != OpAMD64MOVQconst { continue } - y := v_0.Args[1] - x := v_0.Args[0] - if v_1.Op != OpAMD64ANDQ { + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpAMD64SHLXQ { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpAMD64NEGL { - continue - } - v_1_1_0_0_0 := v_1_1_0_0.Args[0] - if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 { - continue - } - v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0] - if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] { - continue - } - v.reset(OpAMD64RORQ) - v.AddArg2(x, y) - return true + v.reset(OpAMD64ORQconst) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg(x) + return true + } + break + } + // match: (ORQ x (MOVLconst [c])) + // result: (ORQconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpAMD64MOVLconst { + continue } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpAMD64ORQconst) + v.AuxInt = int32ToAuxInt(c) + v.AddArg(x) + return true } break } @@ -20356,20 +18494,6 @@ func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool { } func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool { v_0 := v.Args[0] - // match: (ROLBconst [c] (ROLBconst [d] x)) - // result: (ROLBconst [(c+d)& 7] x) - for { - c := auxIntToInt8(v.AuxInt) - if v_0.Op != OpAMD64ROLBconst { - break - } - d := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64ROLBconst) - v.AuxInt = int8ToAuxInt((c + d) & 7) - v.AddArg(x) - return true - } // match: (ROLBconst x [0]) // result: x for { @@ -20439,20 +18563,6 @@ func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool { } func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool { v_0 := v.Args[0] - // match: (ROLLconst [c] (ROLLconst [d] x)) - // result: (ROLLconst [(c+d)&31] x) - for { - c := auxIntToInt8(v.AuxInt) - if v_0.Op != OpAMD64ROLLconst { - break - } - d := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64ROLLconst) - v.AuxInt = int8ToAuxInt((c + d) & 31) - v.AddArg(x) - return true - } // match: (ROLLconst x [0]) // result: x for { @@ -20522,20 +18632,6 @@ func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool { } func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool { v_0 := v.Args[0] - // match: (ROLQconst [c] (ROLQconst [d] x)) - // result: (ROLQconst [(c+d)&63] x) - for { - c := auxIntToInt8(v.AuxInt) - if v_0.Op != OpAMD64ROLQconst { - break - } - d := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64ROLQconst) - v.AuxInt = int8ToAuxInt((c + d) & 63) - v.AddArg(x) - return true - } // match: (ROLQconst x [0]) // result: x for { @@ -20605,20 +18701,6 @@ func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool { } func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool { v_0 := v.Args[0] - // match: (ROLWconst [c] (ROLWconst [d] x)) - // result: (ROLWconst [(c+d)&15] x) - for { - c := auxIntToInt8(v.AuxInt) - if v_0.Op != OpAMD64ROLWconst { - break - } - d := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt((c + d) & 15) - v.AddArg(x) - return true - } // match: (ROLWconst x [0]) // result: x for { @@ -29797,80 +27879,6 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { } break } - // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRLconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRWconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) { - continue - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLLconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRBconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) { - continue - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (XORL x x) // result: (MOVLconst [0]) for { @@ -30357,30 +28365,6 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } break } - // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAMD64SHLQconst { - continue - } - c := auxIntToInt8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpAMD64SHRQconst { - continue - } - d := auxIntToInt8(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = int8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (XORQ x x) // result: (MOVQconst [0]) for { diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 1b50bf9aa6..0aebdced40 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -2080,22 +2080,6 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDshiftLL [c] (SRLconst x [32-c]) x) - // result: (SRRconst [32-c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (ADDshiftLL [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) // result: (REV16 x) for { @@ -2285,22 +2269,6 @@ func rewriteValueARM_OpARMADDshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDshiftRL [c] (SLLconst x [32-c]) x) - // result: (SRRconst [ c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } return false } func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { @@ -8596,22 +8564,6 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: ( ORshiftLL [c] (SRLconst x [32-c]) x) - // result: (SRRconst [32-c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (ORshiftLL [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) // result: (REV16 x) for { @@ -8831,22 +8783,6 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: ( ORshiftRL [c] (SLLconst x [32-c]) x) - // result: (SRRconst [ c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } // match: (ORshiftRL y:(SRLconst x [c]) x [c]) // result: y for { @@ -12755,22 +12691,6 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: (XORshiftLL [c] (SRLconst x [32-c]) x) - // result: (SRRconst [32-c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (XORshiftLL [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) // result: (REV16 x) for { @@ -12990,22 +12910,6 @@ func rewriteValueARM_OpARMXORshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (XORshiftRL [c] (SLLconst x [32-c]) x) - // result: (SRRconst [ c] x) - for { - c := auxIntToInt32(v.AuxInt) - if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARMSRRconst) - v.AuxInt = int32ToAuxInt(c) - v.AddArg(x) - return true - } // match: (XORshiftRL (SRLconst x [c]) x [c]) // result: (MOVWconst [0]) for { diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 65d5e5f339..fc1fa5337e 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1181,8 +1181,6 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { func rewriteValueARM64_OpARM64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (ADD x (MOVDconst [c])) // result: (ADDconst [c] x) for { @@ -1364,287 +1362,6 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { } break } - // match: (ADD (SLL x (ANDconst [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x (NEG y)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 63 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64ROR) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg2(x, v0) - return true - } - break - } - // match: (ADD (SRL x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 63 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64ROR) - v.AddArg2(x, y) - return true - } - break - } - // match: (ADD (SLL x (ANDconst [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg2(x, v0) - return true - } - break - } - // match: (ADD (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - continue - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - continue - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v.AddArg2(x, y) - return true - } - break - } return false } func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { @@ -1757,41 +1474,6 @@ func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDshiftLL [c] (SRLconst x [64-c]) x) - // result: (RORconst [64-c] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARM64RORconst) - v.AuxInt = int64ToAuxInt(64 - c) - v.AddArg(x) - return true - } - // match: (ADDshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64UBFX { - break - } - bfc := auxIntToArm64BitField(v_0.AuxInt) - x := v_0.Args[0] - if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { @@ -1989,40 +1671,6 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDshiftRL [c] (SLLconst x [64-c]) x) - // result: (RORconst [ c] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARM64RORconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - // match: (ADDshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } return false } func rewriteValueARM64_OpARM64AND(v *Value) bool { @@ -16819,7 +16467,6 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types // match: (OR x (MOVDconst [c])) // result: (ORconst [c] x) for { @@ -16949,328 +16596,47 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { } break } - // match: (OR (SLL x (ANDconst [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x (NEG y)) + // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) + // cond: ac == ^((1< x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x y) + // match: (OR (UBFX [bfc] x) (ANDconst [ac] y)) + // cond: ac == ^(1< [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg2(x, v0) - return true - } - break - } - // match: (OR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - continue - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - continue - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v.AddArg2(x, y) - return true - } - break - } - // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y)) - // cond: ac == ^((1< [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64UBFX { - break - } - bfc := auxIntToArm64BitField(v_0.AuxInt) - x := v_0.Args[0] - if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { @@ -20924,40 +20255,6 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v.copyOf(y) return true } - // match: ( ORshiftRL [c] (SLLconst x [64-c]) x) - // result: (RORconst [ c] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARM64RORconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - // match: ( ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) // cond: lc > rc && ac == ^((1< [63] y)) (CSEL0 [cc] (SRL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x (NEG y)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 63 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64ROR) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg2(x, v0) - return true - } - break - } - // match: (XOR (SRL x (ANDconst [63] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [64]) (ANDconst [63] y))) (CMPconst [64] (SUB (MOVDconst [64]) (ANDconst [63] y))))) - // cond: cc == OpARM64LessThanU - // result: (ROR x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 63 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64ROR) - v.AddArg2(x, y) - return true - } - break - } - // match: (XOR (SLL x (ANDconst [31] y)) (CSEL0 [cc] (SRL (MOVWUreg x) (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x (NEG y)) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SLL { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 { - continue - } - _ = v_1_0.Args[1] - v_1_0_0 := v_1_0.Args[0] - if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v0 := b.NewValue0(v.Pos, OpARM64NEG, t) - v0.AddArg(y) - v.AddArg2(x, v0) - return true - } - break - } - // match: (XOR (SRL (MOVWUreg x) (ANDconst [31] y)) (CSEL0 [cc] (SLL x (SUB (MOVDconst [32]) (ANDconst [31] y))) (CMPconst [64] (SUB (MOVDconst [32]) (ANDconst [31] y))))) - // cond: cc == OpARM64LessThanU - // result: (RORW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 { - continue - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpARM64MOVWUreg { - continue - } - x := v_0_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpARM64ANDconst { - continue - } - t := v_0_1.Type - if auxIntToInt64(v_0_1.AuxInt) != 31 { - continue - } - y := v_0_1.Args[0] - if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 { - continue - } - cc := auxIntToOp(v_1.AuxInt) - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpARM64SLL { - continue - } - _ = v_1_0.Args[1] - if x != v_1_0.Args[0] { - continue - } - v_1_0_1 := v_1_0.Args[1] - if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t { - continue - } - _ = v_1_0_1.Args[1] - v_1_0_1_0 := v_1_0_1.Args[0] - if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 { - continue - } - v_1_0_1_1 := v_1_0_1.Args[1] - if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t { - continue - } - _ = v_1_1_0.Args[1] - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 { - continue - } - v_1_1_0_1 := v_1_1_0.Args[1] - if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) { - continue - } - v.reset(OpARM64RORW) - v.AddArg2(x, y) - return true - } - break - } return false } func rewriteValueARM64_OpARM64XORconst(v *Value) bool { @@ -23417,41 +22431,6 @@ func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } - // match: (XORshiftLL [c] (SRLconst x [64-c]) x) - // result: (RORconst [64-c] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARM64RORconst) - v.AuxInt = int64ToAuxInt(64 - c) - v.AddArg(x) - return true - } - // match: (XORshiftLL [c] (UBFX [bfc] x) x) - // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) - // result: (RORWconst [32-c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64UBFX { - break - } - bfc := auxIntToArm64BitField(v_0.AuxInt) - x := v_0.Args[0] - if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(32 - c) - v.AddArg(x) - return true - } // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 8)] x) x) // result: (REV16W x) for { @@ -23679,40 +22658,6 @@ func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } - // match: (XORshiftRL [c] (SLLconst x [64-c]) x) - // result: (RORconst [ c] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c { - break - } - x := v_0.Args[0] - if x != v_1 { - break - } - v.reset(OpARM64RORconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - // match: (XORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) - // cond: c < 32 && t.Size() == 4 - // result: (RORWconst [c] x) - for { - t := v.Type - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c { - break - } - x := v_0.Args[0] - if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) { - break - } - v.reset(OpARM64RORWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } return false } func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index ae1985854f..3f33a0e6f8 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -648,9 +648,11 @@ func rewriteValuePPC64(v *Value) bool { case OpRotateLeft16: return rewriteValuePPC64_OpRotateLeft16(v) case OpRotateLeft32: - return rewriteValuePPC64_OpRotateLeft32(v) + v.Op = OpPPC64ROTLW + return true case OpRotateLeft64: - return rewriteValuePPC64_OpRotateLeft64(v) + v.Op = OpPPC64ROTL + return true case OpRotateLeft8: return rewriteValuePPC64_OpRotateLeft8(v) case OpRound: @@ -3898,8 +3900,6 @@ func rewriteValuePPC64_OpOffPtr(v *Value) bool { func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types // match: (ADD l:(MULLD x y) z) // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) // result: (MADDLD x y z) @@ -3921,236 +3921,6 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLDconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRDconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ADD (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLWconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRWconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } - // match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) // result: (ADDconst [c] x) @@ -11642,236 +11412,6 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { b := v.Block config := b.Func.Config typ := &b.Func.Config.Types - // match: ( OR (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLDconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRDconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: ( OR (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLWconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRWconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } - // match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } // match: (OR x (NOR y y)) // result: (ORN x y) for { @@ -13950,238 +13490,6 @@ func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool { func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - b := v.Block - typ := &b.Func.Config.Types - // match: (XOR (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (ROTLconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLDconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRDconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 64-c) { - continue - } - v.reset(OpPPC64ROTLconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XOR (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (ROTLWconst [c] x) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLWconst { - continue - } - c := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpPPC64SRWconst { - continue - } - d := auxIntToInt64(v_1.AuxInt) - if x != v_1.Args[0] || !(d == 32-c) { - continue - } - v.reset(OpPPC64ROTLWconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - break - } - // match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) - // result: (ROTL x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLD { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRD { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } - break - } - // match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 { - continue - } - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { - continue - } - v_1_1_0_0 := v_1_1_0.Args[0] - if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } - // match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) - // result: (ROTLW x y) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpPPC64SLW { - continue - } - _ = v_0.Args[1] - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpSelect0 { - continue - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { - continue - } - y := v_0_1_0.Args[0] - if v_1.Op != OpPPC64SRW { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt { - continue - } - _ = v_1_1.Args[1] - v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 { - continue - } - v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { - continue - } - v_1_1_1_0 := v_1_1_1.Args[0] - if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { - continue - } - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } - break - } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { @@ -14461,58 +13769,6 @@ func rewriteValuePPC64_OpRotateLeft16(v *Value) bool { } return false } -func rewriteValuePPC64_OpRotateLeft32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft32 x (MOVDconst [c])) - // result: (ROTLWconst [c&31] x) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpPPC64ROTLWconst) - v.AuxInt = int64ToAuxInt(c & 31) - v.AddArg(x) - return true - } - // match: (RotateLeft32 x y) - // result: (ROTLW x y) - for { - x := v_0 - y := v_1 - v.reset(OpPPC64ROTLW) - v.AddArg2(x, y) - return true - } -} -func rewriteValuePPC64_OpRotateLeft64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft64 x (MOVDconst [c])) - // result: (ROTLconst [c&63] x) - for { - x := v_0 - if v_1.Op != OpPPC64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpPPC64ROTLconst) - v.AuxInt = int64ToAuxInt(c & 63) - v.AddArg(x) - return true - } - // match: (RotateLeft64 x y) - // result: (ROTL x y) - for { - x := v_0 - y := v_1 - v.reset(OpPPC64ROTL) - v.AddArg2(x, y) - return true - } -} func rewriteValuePPC64_OpRotateLeft8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 0d63586149..08bbd43759 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -5280,25 +5280,6 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { } break } - // match: (ADD (SLDconst x [c]) (SRDconst x [64-c])) - // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)}) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLDconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRISBGZ) - v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c)) - v.AddArg(x) - return true - } - break - } // match: (ADD idx (MOVDaddr [c] {s} ptr)) // cond: ptr.Op != OpSB // result: (MOVDaddridx [c] {s} ptr idx) @@ -5473,25 +5454,6 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { } break } - // match: (ADDW (SLWconst x [c]) (SRWconst x [32-c])) - // result: (RLLconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLWconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRLLconst) - v.AuxInt = uint8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (ADDW x (NEGW y)) // result: (SUBW x y) for { @@ -11689,25 +11651,6 @@ func rewriteValueS390X_OpS390XOR(v *Value) bool { } break } - // match: (OR (SLDconst x [c]) (SRDconst x [64-c])) - // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)}) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLDconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRISBGZ) - v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c)) - v.AddArg(x) - return true - } - break - } // match: (OR (MOVDconst [-1<<63]) (LGDR x)) // result: (LGDR (LNDFR x)) for { @@ -12387,25 +12330,6 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { } break } - // match: (ORW (SLWconst x [c]) (SRWconst x [32-c])) - // result: (RLLconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLWconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRLLconst) - v.AuxInt = uint8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (ORW x x) // result: x for { @@ -14972,25 +14896,6 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { } break } - // match: (XOR (SLDconst x [c]) (SRDconst x [64-c])) - // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)}) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLDconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRISBGZ) - v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c)) - v.AddArg(x) - return true - } - break - } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { @@ -15068,25 +14973,6 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { } break } - // match: (XORW (SLWconst x [c]) (SRWconst x [32-c])) - // result: (RLLconst x [c]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpS390XSLWconst { - continue - } - c := auxIntToUint8(v_0.AuxInt) - x := v_0.Args[0] - if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] { - continue - } - v.reset(OpS390XRLLconst) - v.AuxInt = uint8ToAuxInt(c) - v.AddArg(x) - return true - } - break - } // match: (XORW x x) // result: (MOVDconst [0]) for { diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go index 7d9656a4c8..848b0aa1e4 100644 --- a/src/cmd/compile/internal/ssa/rewritedec64.go +++ b/src/cmd/compile/internal/ssa/rewritedec64.go @@ -66,6 +66,14 @@ func rewriteValuedec64(v *Value) bool { return rewriteValuedec64_OpOr32(v) case OpOr64: return rewriteValuedec64_OpOr64(v) + case OpRotateLeft16: + return rewriteValuedec64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValuedec64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValuedec64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValuedec64_OpRotateLeft8(v) case OpRsh16Ux64: return rewriteValuedec64_OpRsh16Ux64(v) case OpRsh16x64: @@ -1266,6 +1274,74 @@ func rewriteValuedec64_OpOr64(v *Value) bool { return true } } +func rewriteValuedec64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft16 x (Int64Make hi lo)) + // result: (RotateLeft16 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft16) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft32 x (Int64Make hi lo)) + // result: (RotateLeft32 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft32) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft64 x (Int64Make hi lo)) + // result: (RotateLeft64 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft64) + v.AddArg2(x, lo) + return true + } + return false +} +func rewriteValuedec64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (RotateLeft8 x (Int64Make hi lo)) + // result: (RotateLeft8 x lo) + for { + x := v_0 + if v_1.Op != OpInt64Make { + break + } + lo := v_1.Args[1] + v.reset(OpRotateLeft8) + v.AddArg2(x, lo) + return true + } + return false +} func rewriteValuedec64_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 99346edd4b..971d369a09 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -453,6 +453,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + config := b.Func.Config // match: (Add16 (Const16 [c]) (Const16 [d])) // result: (Const16 [c+d]) for { @@ -724,12 +725,321 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } break } + // match: (Add16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh16x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } return false } func rewriteValuegeneric_OpAdd32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + config := b.Func.Config // match: (Add32 (Const32 [c]) (Const32 [d])) // result: (Const32 [c+d]) for { @@ -1001,104 +1311,413 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpAdd32F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Add32F (Const32F [c]) (Const32F [d])) - // cond: c+d == c+d - // result: (Const32F [c+d]) + // match: (Add32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32F { + if v_0.Op != OpLsh32x64 { continue } - c := auxIntToFloat32(v_0.AuxInt) - if v_1.Op != OpConst32F { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToFloat32(v_1.AuxInt) - if !(c+d == c+d) { + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { continue } - v.reset(OpConst32F) - v.AuxInt = float32ToAuxInt(c + d) + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) return true } break } - return false -} -func rewriteValuegeneric_OpAdd64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Add64 (Const64 [c]) (Const64 [d])) - // result: (Const64 [c+d]) + // match: (Add32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 { + left := v_0 + if left.Op != OpLsh32x64 { continue } - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpConst64 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { continue } - d := auxIntToInt64(v_1.AuxInt) - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(c + d) + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Add64 (Mul64 x y) (Mul64 x z)) - // result: (Mul64 x (Add64 y z)) + // match: (Add32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { - t := v.Type for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpMul64 { + left := v_0 + if left.Op != OpLsh32x32 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - x := v_0_0 - y := v_0_1 - if v_1.Op != OpMul64 { - continue - } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { - if x != v_1_0 { - continue - } - z := v_1_1 - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg2(y, z) - v.AddArg2(x, v0) - return true - } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true } break } - // match: (Add64 (Const64 [0]) x) - // result: x + // match: (Add32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + left := v_0 + if left.Op != OpLsh32x16 { continue } - x := v_1 - v.copyOf(x) - return true - } - break + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh32Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Add32F (Const32F [c]) (Const32F [d])) + // cond: c+d == c+d + // result: (Const32F [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32F { + continue + } + c := auxIntToFloat32(v_0.AuxInt) + if v_1.Op != OpConst32F { + continue + } + d := auxIntToFloat32(v_1.AuxInt) + if !(c+d == c+d) { + continue + } + v.reset(OpConst32F) + v.AuxInt = float32ToAuxInt(c + d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + break + } + // match: (Add64 (Mul64 x y) (Mul64 x z)) + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul64 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul64 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + z := v_1_1 + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg2(y, z) + v.AddArg2(x, v0) + return true + } + } + } + break + } + // match: (Add64 (Const64 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break } // match: (Add64 x (Neg64 y)) // result: (Sub64 x y) @@ -1305,73 +1924,382 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpAdd64F(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (Add64F (Const64F [c]) (Const64F [d])) - // cond: c+d == c+d - // result: (Const64F [c+d]) + // match: (Add64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64F { + if v_0.Op != OpLsh64x64 { continue } - c := auxIntToFloat64(v_0.AuxInt) - if v_1.Op != OpConst64F { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToFloat64(v_1.AuxInt) - if !(c+d == c+d) { + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { continue } - v.reset(OpConst64F) - v.AuxInt = float64ToAuxInt(c + d) + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 64 && d == 64-c && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - return false -} -func rewriteValuegeneric_OpAdd8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Add8 (Const8 [c]) (Const8 [d])) - // result: (Const8 [c+d]) + // match: (Add64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst8 { + left := v_0 + if left.Op != OpLsh64x64 { continue } - c := auxIntToInt8(v_0.AuxInt) - if v_1.Op != OpConst8 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { continue } - d := auxIntToInt8(v_1.AuxInt) - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(c + d) + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (Add8 (Mul8 x y) (Mul8 x z)) - // result: (Mul8 x (Add8 y z)) + // match: (Add64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { - t := v.Type for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpMul8 { + left := v_0 + if left.Op != OpLsh64x32 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - x := v_0_0 - y := v_0_1 - if v_1.Op != OpMul8 { - continue + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh64Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Add64F (Const64F [c]) (Const64F [d])) + // cond: c+d == c+d + // result: (Const64F [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst64F { + continue + } + c := auxIntToFloat64(v_0.AuxInt) + if v_1.Op != OpConst64F { + continue + } + d := auxIntToFloat64(v_1.AuxInt) + if !(c+d == c+d) { + continue + } + v.reset(OpConst64F) + v.AuxInt = float64ToAuxInt(c + d) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAdd8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Add8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c+d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c + d) + return true + } + break + } + // match: (Add8 (Mul8 x y) (Mul8 x z)) + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpMul8 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + y := v_0_1 + if v_1.Op != OpMul8 { + continue } _ = v_1.Args[1] v_1_0 := v_1.Args[0] @@ -1609,98 +2537,406 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpAddPtr(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (AddPtr x (Const64 [c])) - // result: (OffPtr x [c]) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst64 { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpOffPtr) - v.Type = t - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) - return true - } - // match: (AddPtr x (Const32 [c])) - // result: (OffPtr x [int64(c)]) - for { - t := v.Type - x := v_0 - if v_1.Op != OpConst32 { - break - } - c := auxIntToInt32(v_1.AuxInt) - v.reset(OpOffPtr) - v.Type = t - v.AuxInt = int64ToAuxInt(int64(c)) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (And16 (Const16 [c]) (Const16 [d])) - // result: (Const16 [c&d]) + // match: (Add8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 { + if v_0.Op != OpLsh8x64 { continue } - c := auxIntToInt16(v_0.AuxInt) - if v_1.Op != OpConst16 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToInt16(v_1.AuxInt) - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(c & d) - return true - } - break - } - // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) - // cond: c >= int64(16-ntz16(m)) - // result: (Const16 [0]) - for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 { + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { continue } - m := auxIntToInt16(v_0.AuxInt) - if v_1.Op != OpRsh16Ux64 { + _ = v_1.Args[1] + if x != v_1.Args[0] { continue } - _ = v_1.Args[1] v_1_1 := v_1.Args[1] if v_1_1.Op != OpConst64 { continue } - c := auxIntToInt64(v_1_1.AuxInt) - if !(c >= int64(16-ntz16(m))) { + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { continue } - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(0) + v.reset(OpRotateLeft8) + v.AddArg2(x, z) return true } break } - // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) - // cond: c >= int64(16-nlz16(m)) - // result: (Const16 [0]) + // match: (Add8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 { + left := v_0 + if left.Op != OpLsh8x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Add8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpAddPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AddPtr x (Const64 [c])) + // result: (OffPtr x [c]) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (AddPtr x (Const32 [c])) + // result: (OffPtr x [int64(c)]) + for { + t := v.Type + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = int64ToAuxInt(int64(c)) + v.AddArg(x) + return true + } + return false +} +func rewriteValuegeneric_OpAnd16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (And16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c&d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c & d) + return true + } + break + } + // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) + // cond: c >= int64(16-ntz16(m)) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + m := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c >= int64(16-ntz16(m))) { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + break + } + // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) + // cond: c >= int64(16-nlz16(m)) + // result: (Const16 [0]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { continue } m := auxIntToInt16(v_0.AuxInt) @@ -17106,6 +18342,7 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + config := b.Func.Config // match: (Or16 (Const16 [c]) (Const16 [d])) // result: (Const16 [c|d]) for { @@ -17295,32 +18532,341 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpOr32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Or32 (Const32 [c]) (Const32 [d])) - // result: (Const32 [c|d]) + // match: (Or16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32 { + if v_0.Op != OpLsh16x64 { continue } - c := auxIntToInt32(v_0.AuxInt) - if v_1.Op != OpConst32 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToInt32(v_1.AuxInt) - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(c | d) + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) return true } break } - // match: (Or32 x x) - // result: x + // match: (Or16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOr32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c|d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c | d) + return true + } + break + } + // match: (Or32 x x) + // result: x for { x := v_0 if x != v_1 { @@ -17490,226 +19036,340 @@ func rewriteValuegeneric_OpOr32(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpOr64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Or64 (Const64 [c]) (Const64 [d])) - // result: (Const64 [c|d]) + // match: (Or32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 { + if v_0.Op != OpLsh32x64 { continue } - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpConst64 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToInt64(v_1.AuxInt) - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(c | d) + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) return true } break } - // match: (Or64 x x) - // result: x + // match: (Or32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { - x := v_0 - if x != v_1 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true } - v.copyOf(x) - return true + break } - // match: (Or64 (Const64 [0]) x) - // result: x + // match: (Or32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + left := v_0 + if left.Op != OpLsh32x32 { continue } - x := v_1 - v.copyOf(x) + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Or64 (Const64 [-1]) _) - // result: (Const64 [-1]) + // match: (Or32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + left := v_0 + if left.Op != OpLsh32x16 { continue } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(-1) + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Or64 (Com64 x) x) - // result: (Const64 [-1]) + // match: (Or32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpCom64 { + left := v_0 + if left.Op != OpLsh32x8 { continue } - x := v_0.Args[0] - if x != v_1 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { continue } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(-1) + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Or64 x (Or64 x y)) - // result: (Or64 x y) + // match: (Or32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpOr64 { + right := v_0 + if right.Op != OpRsh32Ux64 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if x != v_1_0 { - continue - } - y := v_1_1 - v.reset(OpOr64) - v.AddArg2(x, y) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } - // match: (Or64 (And64 x (Const64 [c2])) (Const64 [c1])) - // cond: ^(c1 | c2) == 0 - // result: (Or64 (Const64 [c1]) x) + // match: (Or32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAnd64 { + right := v_0 + if right.Op != OpRsh32Ux32 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - x := v_0_0 - if v_0_1.Op != OpConst64 { - continue - } - c2 := auxIntToInt64(v_0_1.AuxInt) - if v_1.Op != OpConst64 { - continue - } - t := v_1.Type - c1 := auxIntToInt64(v_1.AuxInt) - if !(^(c1 | c2) == 0) { - continue - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64ToAuxInt(c1) - v.AddArg2(v0, x) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } - // match: (Or64 (Or64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) + // match: (Or32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpOr64 { + right := v_0 + if right.Op != OpRsh32Ux16 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - i := v_0_0 - if i.Op != OpConst64 { - continue - } - t := i.Type - z := v_0_1 - x := v_1 - if !(z.Op != OpConst64 && x.Op != OpConst64) { - continue - } - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg2(z, x) - v.AddArg2(i, v0) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } - // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) - // result: (Or64 (Const64 [c|d]) x) + // match: (Or32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 { + right := v_0 + if right.Op != OpRsh32Ux8 { continue } - t := v_0.Type - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpOr64 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - continue - } - d := auxIntToInt64(v_1_0.AuxInt) - x := v_1_1 - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64ToAuxInt(c | d) - v.AddArg2(v0, x) - return true + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } return false } -func rewriteValuegeneric_OpOr8(v *Value) bool { +func rewriteValuegeneric_OpOr64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Or8 (Const8 [c]) (Const8 [d])) - // result: (Const8 [c|d]) + config := b.Func.Config + // match: (Or64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c|d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { continue } - c := auxIntToInt8(v_0.AuxInt) - if v_1.Op != OpConst8 { + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { continue } - d := auxIntToInt8(v_1.AuxInt) - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(c | d) + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c | d) return true } break } - // match: (Or8 x x) + // match: (Or64 x x) // result: x for { x := v_0 @@ -17719,11 +19379,11 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { v.copyOf(x) return true } - // match: (Or8 (Const8 [0]) x) + // match: (Or64 (Const64 [0]) x) // result: x for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { continue } x := v_1 @@ -17732,42 +19392,42 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { } break } - // match: (Or8 (Const8 [-1]) _) - // result: (Const8 [-1]) + // match: (Or64 (Const64 [-1]) _) + // result: (Const64 [-1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { continue } - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(-1) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) return true } break } - // match: (Or8 (Com8 x) x) - // result: (Const8 [-1]) + // match: (Or64 (Com64 x) x) + // result: (Const64 [-1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpCom8 { + if v_0.Op != OpCom64 { continue } x := v_0.Args[0] if x != v_1 { continue } - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(-1) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) return true } break } - // match: (Or8 x (Or8 x y)) - // result: (Or8 x y) + // match: (Or64 x (Or64 x y)) + // result: (Or64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpOr8 { + if v_1.Op != OpOr64 { continue } _ = v_1.Args[1] @@ -17778,19 +19438,19 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { continue } y := v_1_1 - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg2(x, y) return true } } break } - // match: (Or8 (And8 x (Const8 [c2])) (Const8 [c1])) + // match: (Or64 (And64 x (Const64 [c2])) (Const64 [c1])) // cond: ^(c1 | c2) == 0 - // result: (Or8 (Const8 [c1]) x) + // result: (Or64 (Const64 [c1]) x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpAnd8 { + if v_0.Op != OpAnd64 { continue } _ = v_0.Args[1] @@ -17798,33 +19458,33 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { v_0_1 := v_0.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { x := v_0_0 - if v_0_1.Op != OpConst8 { + if v_0_1.Op != OpConst64 { continue } - c2 := auxIntToInt8(v_0_1.AuxInt) - if v_1.Op != OpConst8 { + c2 := auxIntToInt64(v_0_1.AuxInt) + if v_1.Op != OpConst64 { continue } t := v_1.Type - c1 := auxIntToInt8(v_1.AuxInt) + c1 := auxIntToInt64(v_1.AuxInt) if !(^(c1 | c2) == 0) { continue } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int8ToAuxInt(c1) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c1) v.AddArg2(v0, x) return true } } break } - // match: (Or8 (Or8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) + // match: (Or64 (Or64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Or64 i (Or64 z x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpOr8 { + if v_0.Op != OpOr64 { continue } _ = v_0.Args[1] @@ -17832,17 +19492,17 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { v_0_1 := v_0.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { i := v_0_0 - if i.Op != OpConst8 { + if i.Op != OpConst64 { continue } t := i.Type z := v_0_1 x := v_1 - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpOr8, t) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpOr64, t) v0.AddArg2(z, x) v.AddArg2(i, v0) return true @@ -17850,57 +19510,52 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { } break } - // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) - // result: (Or8 (Const8 [c|d]) x) + // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) + // result: (Or64 (Const64 [c|d]) x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { continue } t := v_0.Type - c := auxIntToInt8(v_0.AuxInt) - if v_1.Op != OpOr8 { + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpOr64 { continue } _ = v_1.Args[1] v_1_0 := v_1.Args[0] v_1_1 := v_1.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst8 || v_1_0.Type != t { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } - d := auxIntToInt8(v_1_0.AuxInt) + d := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int8ToAuxInt(c | d) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c | d) v.AddArg2(v0, x) return true } } break } - return false -} -func rewriteValuegeneric_OpOrB(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) - // cond: c >= d - // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + // match: (Or64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess64 { + if v_0.Op != OpLsh64x64 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLess64 { + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { continue } _ = v_1.Args[1] @@ -17912,528 +19567,499 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt64(v_1_1.AuxInt) - if !(c >= d) { + if !(c < 64 && d == 64-c && canRotate(config, 64)) { continue } - v.reset(OpLess64U) - v0 := b.NewValue0(v.Pos, OpConst64, x.Type) - v0.AuxInt = int64ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub64, x.Type) - v2 := b.NewValue0(v.Pos, OpConst64, x.Type) - v2.AuxInt = int64ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) - // cond: c >= d - // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) + // match: (Or64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq64 { + left := v_0 + if left.Op != OpLsh64x64 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { continue } - c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLess64 { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = right.Args[1] + if x != right.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + right_1 := right.Args[1] + if right_1.Op != OpSub64 { continue } - d := auxIntToInt64(v_1_1.AuxInt) - if !(c >= d) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpConst64, x.Type) - v0.AuxInt = int64ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub64, x.Type) - v2 := b.NewValue0(v.Pos, OpConst64, x.Type) - v2.AuxInt = int64ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) - // cond: c >= d - // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + // match: (Or64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess32 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + left := v_0 + if left.Op != OpLsh64x32 { continue } - c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLess32 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = right.Args[1] + if x != right.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + right_1 := right.Args[1] + if right_1.Op != OpSub32 { continue } - d := auxIntToInt32(v_1_1.AuxInt) - if !(c >= d) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLess32U) - v0 := b.NewValue0(v.Pos, OpConst32, x.Type) - v0.AuxInt = int32ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub32, x.Type) - v2 := b.NewValue0(v.Pos, OpConst32, x.Type) - v2.AuxInt = int32ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) - // cond: c >= d - // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) + // match: (Or64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq32 { + left := v_0 + if left.Op != OpLsh64x16 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLess32 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = right.Args[1] + if x != right.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + right_1 := right.Args[1] + if right_1.Op != OpSub16 { continue } - d := auxIntToInt32(v_1_1.AuxInt) - if !(c >= d) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpConst32, x.Type) - v0.AuxInt = int32ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub32, x.Type) - v2 := b.NewValue0(v.Pos, OpConst32, x.Type) - v2.AuxInt = int32ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) - // cond: c >= d - // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + // match: (Or64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess16 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { + left := v_0 + if left.Op != OpLsh64x8 { continue } - c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLess16 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = right.Args[1] + if x != right.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { + right_1 := right.Args[1] + if right_1.Op != OpSub8 { continue } - d := auxIntToInt16(v_1_1.AuxInt) - if !(c >= d) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLess16U) - v0 := b.NewValue0(v.Pos, OpConst16, x.Type) - v0.AuxInt = int16ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub16, x.Type) - v2 := b.NewValue0(v.Pos, OpConst16, x.Type) - v2.AuxInt = int16ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) - // cond: c >= d - // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + // match: (Or64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq16 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { + right := v_0 + if right.Op != OpRsh64Ux64 { continue } - c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLess16 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = left.Args[1] + if x != left.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { + z := left.Args[1] + if z.Op != OpSub64 { continue } - d := auxIntToInt16(v_1_1.AuxInt) - if !(c >= d) { + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpConst16, x.Type) - v0.AuxInt = int16ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub16, x.Type) - v2 := b.NewValue0(v.Pos, OpConst16, x.Type) - v2.AuxInt = int16ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) - // cond: c >= d - // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + // match: (Or64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess8 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { + right := v_0 + if right.Op != OpRsh64Ux32 { continue } - c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLess8 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = left.Args[1] + if x != left.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { + z := left.Args[1] + if z.Op != OpSub32 { continue } - d := auxIntToInt8(v_1_1.AuxInt) - if !(c >= d) { + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLess8U) - v0 := b.NewValue0(v.Pos, OpConst8, x.Type) - v0.AuxInt = int8ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub8, x.Type) - v2 := b.NewValue0(v.Pos, OpConst8, x.Type) - v2.AuxInt = int8ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) - // cond: c >= d - // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + // match: (Or64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq8 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { + right := v_0 + if right.Op != OpRsh64Ux16 { continue } - c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLess8 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = left.Args[1] + if x != left.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { + z := left.Args[1] + if z.Op != OpSub16 { continue } - d := auxIntToInt8(v_1_1.AuxInt) - if !(c >= d) { + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpConst8, x.Type) - v0.AuxInt = int8ToAuxInt(c - d) - v1 := b.NewValue0(v.Pos, OpSub8, x.Type) - v2 := b.NewValue0(v.Pos, OpConst8, x.Type) - v2.AuxInt = int8ToAuxInt(d) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + // match: (Or64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess64 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + right := v_0 + if right.Op != OpRsh64Ux8 { continue } - c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLeq64 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = left.Args[1] + if x != left.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + z := left.Args[1] + if z.Op != OpSub8 { continue } - d := auxIntToInt64(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpLess64U) - v0 := b.NewValue0(v.Pos, OpConst64, x.Type) - v0.AuxInt = int64ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub64, x.Type) - v2 := b.NewValue0(v.Pos, OpConst64, x.Type) - v2.AuxInt = int64ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + return false +} +func rewriteValuegeneric_OpOr8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Or8 (Const8 [c]) (Const8 [d])) + // result: (Const8 [c|d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq64 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - continue - } - c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLeq64 { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + if v_0.Op != OpConst8 { continue } - d := auxIntToInt64(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpConst8 { continue } - v.reset(OpLeq64U) - v0 := b.NewValue0(v.Pos, OpConst64, x.Type) - v0.AuxInt = int64ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub64, x.Type) - v2 := b.NewValue0(v.Pos, OpConst64, x.Type) - v2.AuxInt = int64ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + d := auxIntToInt8(v_1.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c | d) return true } break } - // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + // match: (Or8 x x) + // result: x for { - for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess32 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - continue - } - c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLeq32 { - continue - } - _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + // match: (Or8 (Const8 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 { continue } - d := auxIntToInt32(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Or8 (Const8 [-1]) _) + // result: (Const8 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { continue } - v.reset(OpLess32U) - v0 := b.NewValue0(v.Pos, OpConst32, x.Type) - v0.AuxInt = int32ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub32, x.Type) - v2 := b.NewValue0(v.Pos, OpConst32, x.Type) - v2.AuxInt = int32ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) return true } break } - // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + // match: (Or8 (Com8 x) x) + // result: (Const8 [-1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq32 { + if v_0.Op != OpCom8 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + x := v_0.Args[0] + if x != v_1 { continue } - c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLeq32 { + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(-1) + return true + } + break + } + // match: (Or8 x (Or8 x y)) + // result: (Or8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpOr8 { continue } _ = v_1.Args[1] - if x != v_1.Args[0] { - continue - } + v_1_0 := v_1.Args[0] v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - continue + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.reset(OpOr8) + v.AddArg2(x, y) + return true } - d := auxIntToInt32(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + } + break + } + // match: (Or8 (And8 x (Const8 [c2])) (Const8 [c1])) + // cond: ^(c1 | c2) == 0 + // result: (Or8 (Const8 [c1]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpAnd8 { continue } - v.reset(OpLeq32U) - v0 := b.NewValue0(v.Pos, OpConst32, x.Type) - v0.AuxInt = int32ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub32, x.Type) - v2 := b.NewValue0(v.Pos, OpConst32, x.Type) - v2.AuxInt = int32ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) - return true + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpConst8 { + continue + } + c2 := auxIntToInt8(v_0_1.AuxInt) + if v_1.Op != OpConst8 { + continue + } + t := v_1.Type + c1 := auxIntToInt8(v_1.AuxInt) + if !(^(c1 | c2) == 0) { + continue + } + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c1) + v.AddArg2(v0, x) + return true + } } break } - // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + // match: (Or8 (Or8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Or8 i (Or8 z x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess16 { + if v_0.Op != OpOr8 { continue } - x := v_0.Args[1] + _ = v_0.Args[1] v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - continue + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst8 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst8 && x.Op != OpConst8) { + continue + } + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpOr8, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true } - c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLeq16 { + } + break + } + // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) + // result: (Or8 (Const8 [c|d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst8 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + t := v_0.Type + c := auxIntToInt8(v_0.AuxInt) + if v_1.Op != OpOr8 { continue } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - continue - } - d := auxIntToInt16(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { - continue + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst8 || v_1_0.Type != t { + continue + } + d := auxIntToInt8(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(c | d) + v.AddArg2(v0, x) + return true } - v.reset(OpLess16U) - v0 := b.NewValue0(v.Pos, OpConst16, x.Type) - v0.AuxInt = int16ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub16, x.Type) - v2 := b.NewValue0(v.Pos, OpConst16, x.Type) - v2.AuxInt = int16ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) - return true } break } - // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + // match: (Or8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq16 { + if v_0.Op != OpLsh8x64 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLeq16 { + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { continue } _ = v_1.Args[1] @@ -18441,113 +20067,303 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { + if v_1_1.Op != OpConst64 { continue } - d := auxIntToInt16(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { continue } - v.reset(OpLeq16U) - v0 := b.NewValue0(v.Pos, OpConst16, x.Type) - v0.AuxInt = int16ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub16, x.Type) - v2 := b.NewValue0(v.Pos, OpConst16, x.Type) - v2.AuxInt = int16ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft8) + v.AddArg2(x, z) return true } break } - // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + // match: (Or8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess8 { - continue - } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { + left := v_0 + if left.Op != OpLsh8x64 { continue } - c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLeq8 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + _ = right.Args[1] + if x != right.Args[0] { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { + right_1 := right.Args[1] + if right_1.Op != OpSub64 { continue } - d := auxIntToInt8(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { continue } - v.reset(OpLess8U) - v0 := b.NewValue0(v.Pos, OpConst8, x.Type) - v0.AuxInt = int8ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub8, x.Type) - v2 := b.NewValue0(v.Pos, OpConst8, x.Type) - v2.AuxInt = int8ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + v.reset(OpRotateLeft8) + v.AddArg2(x, y) return true } break } - // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) - // cond: c >= d+1 && d+1 > d - // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + // match: (Or8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq8 { + left := v_0 + if left.Op != OpLsh8x32 { continue } - x := v_0.Args[1] - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { continue } - c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLeq8 { + _ = right.Args[1] + if x != right.Args[0] { continue } - _ = v_1.Args[1] - if x != v_1.Args[0] { + right_1 := right.Args[1] + if right_1.Op != OpSub32 { continue } - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { continue } - d := auxIntToInt8(v_1_1.AuxInt) - if !(c >= d+1 && d+1 > d) { + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { continue } - v.reset(OpLeq8U) - v0 := b.NewValue0(v.Pos, OpConst8, x.Type) - v0.AuxInt = int8ToAuxInt(c - d - 1) - v1 := b.NewValue0(v.Pos, OpSub8, x.Type) - v2 := b.NewValue0(v.Pos, OpConst8, x.Type) - v2.AuxInt = int8ToAuxInt(d + 1) - v1.AddArg2(x, v2) - v.AddArg2(v0, v1) + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) return true } break } - // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) - // cond: uint64(c) >= uint64(d) + // match: (Or8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Or8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpOrB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: c >= d // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess64U { + if v_0.Op != OpLess64 { continue } x := v_0.Args[1] @@ -18556,7 +20372,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLess64U { + if v_1.Op != OpLess64 { continue } _ = v_1.Args[1] @@ -18568,7 +20384,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt64(v_1_1.AuxInt) - if !(uint64(c) >= uint64(d)) { + if !(c >= d) { continue } v.reset(OpLess64U) @@ -18583,12 +20399,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) - // cond: uint64(c) >= uint64(d) + // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d]))) + // cond: c >= d // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq64U { + if v_0.Op != OpLeq64 { continue } x := v_0.Args[1] @@ -18597,7 +20413,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLess64U { + if v_1.Op != OpLess64 { continue } _ = v_1.Args[1] @@ -18609,7 +20425,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt64(v_1_1.AuxInt) - if !(uint64(c) >= uint64(d)) { + if !(c >= d) { continue } v.reset(OpLeq64U) @@ -18624,12 +20440,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) - // cond: uint32(c) >= uint32(d) + // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess32U { + if v_0.Op != OpLess32 { continue } x := v_0.Args[1] @@ -18638,7 +20454,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLess32U { + if v_1.Op != OpLess32 { continue } _ = v_1.Args[1] @@ -18650,7 +20466,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt32(v_1_1.AuxInt) - if !(uint32(c) >= uint32(d)) { + if !(c >= d) { continue } v.reset(OpLess32U) @@ -18665,12 +20481,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) - // cond: uint32(c) >= uint32(d) + // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d]))) + // cond: c >= d // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq32U { + if v_0.Op != OpLeq32 { continue } x := v_0.Args[1] @@ -18679,7 +20495,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLess32U { + if v_1.Op != OpLess32 { continue } _ = v_1.Args[1] @@ -18691,7 +20507,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt32(v_1_1.AuxInt) - if !(uint32(c) >= uint32(d)) { + if !(c >= d) { continue } v.reset(OpLeq32U) @@ -18706,12 +20522,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) - // cond: uint16(c) >= uint16(d) + // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess16U { + if v_0.Op != OpLess16 { continue } x := v_0.Args[1] @@ -18720,7 +20536,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLess16U { + if v_1.Op != OpLess16 { continue } _ = v_1.Args[1] @@ -18732,7 +20548,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt16(v_1_1.AuxInt) - if !(uint16(c) >= uint16(d)) { + if !(c >= d) { continue } v.reset(OpLess16U) @@ -18747,12 +20563,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) - // cond: uint16(c) >= uint16(d) + // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d]))) + // cond: c >= d // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq16U { + if v_0.Op != OpLeq16 { continue } x := v_0.Args[1] @@ -18761,7 +20577,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLess16U { + if v_1.Op != OpLess16 { continue } _ = v_1.Args[1] @@ -18773,7 +20589,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt16(v_1_1.AuxInt) - if !(uint16(c) >= uint16(d)) { + if !(c >= d) { continue } v.reset(OpLeq16U) @@ -18788,12 +20604,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) - // cond: uint8(c) >= uint8(d) + // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess8U { + if v_0.Op != OpLess8 { continue } x := v_0.Args[1] @@ -18802,7 +20618,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLess8U { + if v_1.Op != OpLess8 { continue } _ = v_1.Args[1] @@ -18814,7 +20630,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt8(v_1_1.AuxInt) - if !(uint8(c) >= uint8(d)) { + if !(c >= d) { continue } v.reset(OpLess8U) @@ -18829,12 +20645,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) - // cond: uint8(c) >= uint8(d) + // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d]))) + // cond: c >= d // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq8U { + if v_0.Op != OpLeq8 { continue } x := v_0.Args[1] @@ -18843,7 +20659,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLess8U { + if v_1.Op != OpLess8 { continue } _ = v_1.Args[1] @@ -18855,7 +20671,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt8(v_1_1.AuxInt) - if !(uint8(c) >= uint8(d)) { + if !(c >= d) { continue } v.reset(OpLeq8U) @@ -18870,12 +20686,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) - // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess64U { + if v_0.Op != OpLess64 { continue } x := v_0.Args[1] @@ -18884,7 +20700,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLeq64U { + if v_1.Op != OpLeq64 { continue } _ = v_1.Args[1] @@ -18896,7 +20712,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt64(v_1_1.AuxInt) - if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLess64U) @@ -18911,12 +20727,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) - // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq64U { + if v_0.Op != OpLeq64 { continue } x := v_0.Args[1] @@ -18925,7 +20741,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt64(v_0_0.AuxInt) - if v_1.Op != OpLeq64U { + if v_1.Op != OpLeq64 { continue } _ = v_1.Args[1] @@ -18937,7 +20753,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt64(v_1_1.AuxInt) - if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLeq64U) @@ -18952,12 +20768,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) - // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess32U { + if v_0.Op != OpLess32 { continue } x := v_0.Args[1] @@ -18966,7 +20782,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLeq32U { + if v_1.Op != OpLeq32 { continue } _ = v_1.Args[1] @@ -18978,7 +20794,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt32(v_1_1.AuxInt) - if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLess32U) @@ -18993,12 +20809,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) - // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq32U { + if v_0.Op != OpLeq32 { continue } x := v_0.Args[1] @@ -19007,7 +20823,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt32(v_0_0.AuxInt) - if v_1.Op != OpLeq32U { + if v_1.Op != OpLeq32 { continue } _ = v_1.Args[1] @@ -19019,7 +20835,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt32(v_1_1.AuxInt) - if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLeq32U) @@ -19034,12 +20850,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) - // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess16U { + if v_0.Op != OpLess16 { continue } x := v_0.Args[1] @@ -19048,7 +20864,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLeq16U { + if v_1.Op != OpLeq16 { continue } _ = v_1.Args[1] @@ -19060,7 +20876,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt16(v_1_1.AuxInt) - if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLess16U) @@ -19075,12 +20891,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) - // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq16U { + if v_0.Op != OpLeq16 { continue } x := v_0.Args[1] @@ -19089,7 +20905,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt16(v_0_0.AuxInt) - if v_1.Op != OpLeq16U { + if v_1.Op != OpLeq16 { continue } _ = v_1.Args[1] @@ -19101,7 +20917,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt16(v_1_1.AuxInt) - if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLeq16U) @@ -19116,12 +20932,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) - // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLess8U { + if v_0.Op != OpLess8 { continue } x := v_0.Args[1] @@ -19130,7 +20946,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLeq8U { + if v_1.Op != OpLeq8 { continue } _ = v_1.Args[1] @@ -19142,7 +20958,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt8(v_1_1.AuxInt) - if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLess8U) @@ -19157,12 +20973,12 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) - // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) + // cond: c >= d+1 && d+1 > d // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpLeq8U { + if v_0.Op != OpLeq8 { continue } x := v_0.Args[1] @@ -19171,7 +20987,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } c := auxIntToInt8(v_0_0.AuxInt) - if v_1.Op != OpLeq8U { + if v_1.Op != OpLeq8 { continue } _ = v_1.Args[1] @@ -19183,7 +20999,7 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { continue } d := auxIntToInt8(v_1_1.AuxInt) - if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + if !(c >= d+1 && d+1 > d) { continue } v.reset(OpLeq8U) @@ -19198,211 +21014,2663 @@ func rewriteValuegeneric_OpOrB(v *Value) bool { } break } - return false -} -func rewriteValuegeneric_OpPhi(v *Value) bool { - // match: (Phi (Const8 [c]) (Const8 [c])) - // result: (Const8 [c]) + // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Less64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) for { - if len(v.Args) != 2 { - break - } - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := auxIntToInt8(v_0.AuxInt) - v_1 := v.Args[1] - if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true } - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(c) - return true + break } - // match: (Phi (Const16 [c]) (Const16 [c])) - // result: (Const16 [c]) + // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d) + // result: (Leq64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) for { - if len(v.Args) != 2 { - break - } - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := auxIntToInt16(v_0.AuxInt) - v_1 := v.Args[1] - if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLess64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true } - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(c) - return true + break } - // match: (Phi (Const32 [c]) (Const32 [c])) - // result: (Const32 [c]) + // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Less32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) for { - if len(v.Args) != 2 { - break - } - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := auxIntToInt32(v_0.AuxInt) - v_1 := v.Args[1] - if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true } - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(c) - return true + break } - // match: (Phi (Const64 [c]) (Const64 [c])) - // result: (Const64 [c]) + // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d) + // result: (Leq32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) for { - if len(v.Args) != 2 { - break - } - _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := auxIntToInt64(v_0.AuxInt) - v_1 := v.Args[1] - if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLess32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(c) - return true + break } - return false -} -func rewriteValuegeneric_OpPtrIndex(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - config := b.Func.Config + // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Less16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d) + // result: (Leq16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLess16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Less8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d) + // result: (Leq8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLess8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Less64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLess64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) + // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) + // result: (Leq64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq64U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_0_0.AuxInt) + if v_1.Op != OpLeq64U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) { + continue + } + v.reset(OpLeq64U) + v0 := b.NewValue0(v.Pos, OpConst64, x.Type) + v0.AuxInt = int64ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub64, x.Type) + v2 := b.NewValue0(v.Pos, OpConst64, x.Type) + v2.AuxInt = int64ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Less32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLess32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) + // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) + // result: (Leq32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq32U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0_0.AuxInt) + if v_1.Op != OpLeq32U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1_1.AuxInt) + if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) { + continue + } + v.reset(OpLeq32U) + v0 := b.NewValue0(v.Pos, OpConst32, x.Type) + v0.AuxInt = int32ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub32, x.Type) + v2 := b.NewValue0(v.Pos, OpConst32, x.Type) + v2.AuxInt = int32ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Less16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLess16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) + // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) + // result: (Leq16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq16U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0_0.AuxInt) + if v_1.Op != OpLeq16U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1_1.AuxInt) + if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) { + continue + } + v.reset(OpLeq16U) + v0 := b.NewValue0(v.Pos, OpConst16, x.Type) + v0.AuxInt = int16ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub16, x.Type) + v2 := b.NewValue0(v.Pos, OpConst16, x.Type) + v2.AuxInt = int16ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Less8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLess8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLess8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) + // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) + // result: (Leq8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLeq8U { + continue + } + x := v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_0_0.AuxInt) + if v_1.Op != OpLeq8U { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst8 { + continue + } + d := auxIntToInt8(v_1_1.AuxInt) + if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) { + continue + } + v.reset(OpLeq8U) + v0 := b.NewValue0(v.Pos, OpConst8, x.Type) + v0.AuxInt = int8ToAuxInt(c - d - 1) + v1 := b.NewValue0(v.Pos, OpSub8, x.Type) + v2 := b.NewValue0(v.Pos, OpConst8, x.Type) + v2.AuxInt = int8ToAuxInt(d + 1) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpPhi(v *Value) bool { + // match: (Phi (Const8 [c]) (Const8 [c])) + // result: (Const8 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c { + break + } + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(c) + return true + } + // match: (Phi (Const16 [c]) (Const16 [c])) + // result: (Const16 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c) + return true + } + // match: (Phi (Const32 [c]) (Const32 [c])) + // result: (Const32 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c { + break + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c) + return true + } + // match: (Phi (Const64 [c]) (Const64 [c])) + // result: (Const64 [c]) + for { + if len(v.Args) != 2 { + break + } + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_1 := v.Args[1] + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c { + break + } + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValuegeneric_OpPtrIndex(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config typ := &b.Func.Config.Types // match: (PtrIndex ptr idx) // cond: config.PtrSize == 4 && is32Bit(t.Elem().Size()) // result: (AddPtr ptr (Mul32 idx (Const32 [int32(t.Elem().Size())]))) for { - t := v.Type - ptr := v_0 - idx := v_1 - if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) { + t := v.Type + ptr := v_0 + idx := v_1 + if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) { + break + } + v.reset(OpAddPtr) + v0 := b.NewValue0(v.Pos, OpMul32, typ.Int) + v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) + v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size())) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) + return true + } + // match: (PtrIndex ptr idx) + // cond: config.PtrSize == 8 + // result: (AddPtr ptr (Mul64 idx (Const64 [t.Elem().Size()]))) + for { + t := v.Type + ptr := v_0 + idx := v_1 + if !(config.PtrSize == 8) { + break + } + v.reset(OpAddPtr) + v0 := b.NewValue0(v.Pos, OpMul64, typ.Int) + v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) + v1.AuxInt = int64ToAuxInt(t.Elem().Size()) + v0.AddArg2(idx, v1) + v.AddArg2(ptr, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft16 x (Const16 [c])) + // cond: c%16 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1.AuxInt) + if !(c%16 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft16 x (And64 y (Const64 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And32 y (Const32 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And16 y (Const16 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (And8 y (Const8 [c]))) + // cond: c&15 == 15 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&15 == 15 + // result: (RotateLeft16 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&15 == 15) { + continue + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft16 x (Add64 y (Const64 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add32 y (Const32 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add16 y (Const16 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Add8 y (Const8 [c]))) + // cond: c&15 == 0 + // result: (RotateLeft16 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&15 == 0) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft16 x (Sub64 (Const64 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub32 (Const32 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub16 (Const16 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Sub8 (Const8 [c]) y)) + // cond: c&15 == 0 + // result: (RotateLeft16 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&15 == 0) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft16 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft16 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft16) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft32 x (Const32 [c])) + // cond: c%32 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1.AuxInt) + if !(c%32 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft32 x (And64 y (Const64 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And32 y (Const32 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And16 y (Const16 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (And8 y (Const8 [c]))) + // cond: c&31 == 31 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&31 == 31 + // result: (RotateLeft32 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&31 == 31) { + continue + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft32 x (Add64 y (Const64 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add32 y (Const32 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add16 y (Const16 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Add8 y (Const8 [c]))) + // cond: c&31 == 0 + // result: (RotateLeft32 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&31 == 0) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft32 x (Sub64 (Const64 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub32 (Const32 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub16 (Const16 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Sub8 (Const8 [c]) y)) + // cond: c&31 == 0 + // result: (RotateLeft32 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&31 == 0) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft32 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft32 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft64 x (Const64 [c])) + // cond: c%64 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(c%64 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft64 x (And64 y (Const64 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And32 y (Const32 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And16 y (Const16 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (And8 y (Const8 [c]))) + // cond: c&63 == 63 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpNeg64 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&63 == 63 + // result: (RotateLeft64 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&63 == 63) { + continue + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft64 x (Add64 y (Const64 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add32 y (Const32 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add16 y (Const16 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Add8 y (Const8 [c]))) + // cond: c&63 == 0 + // result: (RotateLeft64 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&63 == 0) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft64 x (Sub64 (Const64 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub32 (Const32 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpSub32 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub16 (Const16 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpSub16 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { + break + } + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Sub8 (Const8 [c]) y)) + // cond: c&63 == 0 + // result: (RotateLeft64 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpSub8 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&63 == 0) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + // match: (RotateLeft64 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft64 x (Const32 [int32(c)])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { + break + } + v.reset(OpRotateLeft64) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) + return true + } + return false +} +func rewriteValuegeneric_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (RotateLeft8 x (Const8 [c])) + // cond: c%8 == 0 + // result: x + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + c := auxIntToInt8(v_1.AuxInt) + if !(c%8 == 0) { + break + } + v.copyOf(x) + return true + } + // match: (RotateLeft8 x (And64 y (Const64 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And32 y (Const32 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And16 y (Const16 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (And8 y (Const8 [c]))) + // cond: c&7 == 7 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAnd8 { break } - v.reset(OpAddPtr) - v0 := b.NewValue0(v.Pos, OpMul32, typ.Int) - v1 := b.NewValue0(v.Pos, OpConst32, typ.Int) - v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size())) - v0.AddArg2(idx, v1) - v.AddArg2(ptr, v0) - return true + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break } - // match: (PtrIndex ptr idx) - // cond: config.PtrSize == 8 - // result: (AddPtr ptr (Mul64 idx (Const64 [t.Elem().Size()]))) + // match: (RotateLeft8 x (Neg64 (And64 y (Const64 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg64 y)) for { - t := v.Type - ptr := v_0 - idx := v_1 - if !(config.PtrSize == 8) { + x := v_0 + if v_1.Op != OpNeg64 { break } - v.reset(OpAddPtr) - v0 := b.NewValue0(v.Pos, OpMul64, typ.Int) - v1 := b.NewValue0(v.Pos, OpConst64, typ.Int) - v1.AuxInt = int64ToAuxInt(t.Elem().Size()) - v0.AddArg2(idx, v1) - v.AddArg2(ptr, v0) + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd64 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg32 (And32 y (Const32 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg32 y)) + for { + x := v_0 + if v_1.Op != OpNeg32 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd32 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg16 (And16 y (Const16 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg16 y)) + for { + x := v_0 + if v_1.Op != OpNeg16 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd16 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Neg8 (And8 y (Const8 [c])))) + // cond: c&7 == 7 + // result: (RotateLeft8 x (Neg8 y)) + for { + x := v_0 + if v_1.Op != OpNeg8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAnd8 { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + v_1_0_1 := v_1_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 { + y := v_1_0_0 + if v_1_0_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_0_1.AuxInt) + if !(c&7 == 7) { + continue + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) + return true + } + break + } + // match: (RotateLeft8 x (Add64 y (Const64 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd64 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst64 { + continue + } + c := auxIntToInt64(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add32 y (Const32 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd32 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add16 y (Const16 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd16 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Add8 y (Const8 [c]))) + // cond: c&7 == 0 + // result: (RotateLeft8 x y) + for { + x := v_0 + if v_1.Op != OpAdd8 { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 { + y := v_1_0 + if v_1_1.Op != OpConst8 { + continue + } + c := auxIntToInt8(v_1_1.AuxInt) + if !(c&7 == 0) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (RotateLeft8 x (Sub64 (Const64 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg64 y)) + for { + x := v_0 + if v_1.Op != OpSub64 { + break + } + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg64, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) return true } - return false -} -func rewriteValuegeneric_OpRotateLeft16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft16 x (Const16 [c])) - // cond: c%16 == 0 - // result: x + // match: (RotateLeft8 x (Sub32 (Const32 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg32 y)) for { x := v_0 - if v_1.Op != OpConst16 { + if v_1.Op != OpSub32 { break } - c := auxIntToInt16(v_1.AuxInt) - if !(c%16 == 0) { + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { break } - v.copyOf(x) + c := auxIntToInt32(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg32, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) return true } - return false -} -func rewriteValuegeneric_OpRotateLeft32(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft32 x (Const32 [c])) - // cond: c%32 == 0 - // result: x + // match: (RotateLeft8 x (Sub16 (Const16 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg16 y)) for { x := v_0 - if v_1.Op != OpConst32 { + if v_1.Op != OpSub16 { break } - c := auxIntToInt32(v_1.AuxInt) - if !(c%32 == 0) { + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { break } - v.copyOf(x) + c := auxIntToInt16(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg16, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) return true } - return false -} -func rewriteValuegeneric_OpRotateLeft64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft64 x (Const64 [c])) - // cond: c%64 == 0 - // result: x + // match: (RotateLeft8 x (Sub8 (Const8 [c]) y)) + // cond: c&7 == 0 + // result: (RotateLeft8 x (Neg8 y)) for { x := v_0 - if v_1.Op != OpConst64 { + if v_1.Op != OpSub8 { break } - c := auxIntToInt64(v_1.AuxInt) - if !(c%64 == 0) { + y := v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { break } - v.copyOf(x) + c := auxIntToInt8(v_1_0.AuxInt) + if !(c&7 == 0) { + break + } + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpNeg8, y.Type) + v0.AddArg(y) + v.AddArg2(x, v0) return true } - return false -} -func rewriteValuegeneric_OpRotateLeft8(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - // match: (RotateLeft8 x (Const8 [c])) - // cond: c%8 == 0 - // result: x + // match: (RotateLeft8 x (Const64 [c])) + // cond: config.PtrSize == 4 + // result: (RotateLeft8 x (Const32 [int32(c)])) for { x := v_0 - if v_1.Op != OpConst8 { + if v_1.Op != OpConst64 { break } - c := auxIntToInt8(v_1.AuxInt) - if !(c%8 == 0) { + t := v_1.Type + c := auxIntToInt64(v_1.AuxInt) + if !(config.PtrSize == 4) { break } - v.copyOf(x) + v.reset(OpRotateLeft8) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(x, v0) return true } return false @@ -24849,40 +29117,286 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { break } x := v_0.Args[0] - v.reset(OpSignExt8to16) + v.reset(OpSignExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc32to16 (SignExt16to32 x)) + // result: x + for { + if v_0.Op != OpSignExt16to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to16 (And32 (Const32 [y]) x)) + // cond: y&0xFFFF == 0xFFFF + // result: (Trunc32to16 x) + for { + if v_0.Op != OpAnd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 { + continue + } + y := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc32to16) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc32to8 (Const32 [c])) + // result: (Const8 [int8(c)]) + for { + if v_0.Op != OpConst32 { + break + } + c := auxIntToInt32(v_0.AuxInt) + v.reset(OpConst8) + v.AuxInt = int8ToAuxInt(int8(c)) + return true + } + // match: (Trunc32to8 (ZeroExt8to32 x)) + // result: x + for { + if v_0.Op != OpZeroExt8to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to8 (SignExt8to32 x)) + // result: x + for { + if v_0.Op != OpSignExt8to32 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc32to8 (And32 (Const32 [y]) x)) + // cond: y&0xFF == 0xFF + // result: (Trunc32to8 x) + for { + if v_0.Op != OpAnd32 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst32 { + continue + } + y := auxIntToInt32(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc32to8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to16 (Const64 [c])) + // result: (Const16 [int16(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(int16(c)) + return true + } + // match: (Trunc64to16 (ZeroExt8to64 x)) + // result: (ZeroExt8to16 x) + for { + if v_0.Op != OpZeroExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc64to16 (ZeroExt16to64 x)) + // result: x + for { + if v_0.Op != OpZeroExt16to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to16 (SignExt8to64 x)) + // result: (SignExt8to16 x) + for { + if v_0.Op != OpSignExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt8to16) + v.AddArg(x) + return true + } + // match: (Trunc64to16 (SignExt16to64 x)) + // result: x + for { + if v_0.Op != OpSignExt16to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to16 (And64 (Const64 [y]) x)) + // cond: y&0xFFFF == 0xFFFF + // result: (Trunc64to16 x) + for { + if v_0.Op != OpAnd64 { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + if v_0_0.Op != OpConst64 { + continue + } + y := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFFFF == 0xFFFF) { + continue + } + v.reset(OpTrunc64to16) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { + v_0 := v.Args[0] + // match: (Trunc64to32 (Const64 [c])) + // result: (Const32 [int32(c)]) + for { + if v_0.Op != OpConst64 { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(int32(c)) + return true + } + // match: (Trunc64to32 (ZeroExt8to64 x)) + // result: (ZeroExt8to32 x) + for { + if v_0.Op != OpZeroExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt8to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (ZeroExt16to64 x)) + // result: (ZeroExt16to32 x) + for { + if v_0.Op != OpZeroExt16to64 { + break + } + x := v_0.Args[0] + v.reset(OpZeroExt16to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (ZeroExt32to64 x)) + // result: x + for { + if v_0.Op != OpZeroExt32to64 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Trunc64to32 (SignExt8to64 x)) + // result: (SignExt8to32 x) + for { + if v_0.Op != OpSignExt8to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt8to32) + v.AddArg(x) + return true + } + // match: (Trunc64to32 (SignExt16to64 x)) + // result: (SignExt16to32 x) + for { + if v_0.Op != OpSignExt16to64 { + break + } + x := v_0.Args[0] + v.reset(OpSignExt16to32) v.AddArg(x) return true } - // match: (Trunc32to16 (SignExt16to32 x)) + // match: (Trunc64to32 (SignExt32to64 x)) // result: x for { - if v_0.Op != OpSignExt16to32 { + if v_0.Op != OpSignExt32to64 { break } x := v_0.Args[0] v.copyOf(x) return true } - // match: (Trunc32to16 (And32 (Const32 [y]) x)) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc32to16 x) + // match: (Trunc64to32 (And64 (Const64 [y]) x)) + // cond: y&0xFFFFFFFF == 0xFFFFFFFF + // result: (Trunc64to32 x) for { - if v_0.Op != OpAnd32 { + if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_0.Op != OpConst32 { + if v_0_0.Op != OpConst64 { continue } - y := auxIntToInt32(v_0_0.AuxInt) + y := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 - if !(y&0xFFFF == 0xFFFF) { + if !(y&0xFFFFFFFF == 0xFFFFFFFF) { continue } - v.reset(OpTrunc32to16) + v.reset(OpTrunc64to32) v.AddArg(x) return true } @@ -24890,512 +29404,1047 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { } return false } -func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { +func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { v_0 := v.Args[0] - // match: (Trunc32to8 (Const32 [c])) + // match: (Trunc64to8 (Const64 [c])) // result: (Const8 [int8(c)]) for { - if v_0.Op != OpConst32 { + if v_0.Op != OpConst64 { break } - c := auxIntToInt32(v_0.AuxInt) + c := auxIntToInt64(v_0.AuxInt) v.reset(OpConst8) v.AuxInt = int8ToAuxInt(int8(c)) return true } - // match: (Trunc32to8 (ZeroExt8to32 x)) + // match: (Trunc64to8 (ZeroExt8to64 x)) // result: x for { - if v_0.Op != OpZeroExt8to32 { + if v_0.Op != OpZeroExt8to64 { break } x := v_0.Args[0] v.copyOf(x) return true } - // match: (Trunc32to8 (SignExt8to32 x)) + // match: (Trunc64to8 (SignExt8to64 x)) // result: x for { - if v_0.Op != OpSignExt8to32 { + if v_0.Op != OpSignExt8to64 { break } x := v_0.Args[0] v.copyOf(x) return true } - // match: (Trunc32to8 (And32 (Const32 [y]) x)) + // match: (Trunc64to8 (And64 (Const64 [y]) x)) // cond: y&0xFF == 0xFF - // result: (Trunc32to8 x) + // result: (Trunc64to8 x) for { - if v_0.Op != OpAnd32 { + if v_0.Op != OpAnd64 { break } _ = v_0.Args[1] v_0_0 := v_0.Args[0] v_0_1 := v_0.Args[1] for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_0.Op != OpConst32 { + if v_0_0.Op != OpConst64 { + continue + } + y := auxIntToInt64(v_0_0.AuxInt) + x := v_0_1 + if !(y&0xFF == 0xFF) { + continue + } + v.reset(OpTrunc64to8) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValuegeneric_OpXor16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + // match: (Xor16 (Const16 [c]) (Const16 [d])) + // result: (Const16 [c^d]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpConst16 { + continue + } + d := auxIntToInt16(v_1.AuxInt) + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(c ^ d) + return true + } + break + } + // match: (Xor16 x x) + // result: (Const16 [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(0) + return true + } + // match: (Xor16 (Const16 [0]) x) + // result: x + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true + } + break + } + // match: (Xor16 (Com16 x) x) + // result: (Const16 [-1]) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom16 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst16) + v.AuxInt = int16ToAuxInt(-1) + return true + } + break + } + // match: (Xor16 (Const16 [-1]) x) + // result: (Com16 x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + continue + } + x := v_1 + v.reset(OpCom16) + v.AddArg(x) + return true + } + break + } + // match: (Xor16 x (Xor16 x y)) + // result: y + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } + } + break + } + // match: (Xor16 (Xor16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Xor16 i (Xor16 z x)) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor16 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst16 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst16 && x.Op != OpConst16) { + continue + } + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpXor16, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } + } + break + } + // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) + // result: (Xor16 (Const16 [c^d]) x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst16 { + continue + } + t := v_0.Type + c := auxIntToInt16(v_0.AuxInt) + if v_1.Op != OpXor16 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst16 || v_1_0.Type != t { + continue + } + d := auxIntToInt16(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } + } + break + } + // match: (Xor16 (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) + // cond: c < 16 && d == 16-c && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh16x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh16Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 16 && d == 16-c && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor16 left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { continue } - y := auxIntToInt32(v_0_0.AuxInt) - x := v_0_1 - if !(y&0xFF == 0xFF) { + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { continue } - v.reset(OpTrunc32to8) - v.AddArg(x) + v.reset(OpRotateLeft16) + v.AddArg2(x, y) return true } break } - return false -} -func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { - v_0 := v.Args[0] - // match: (Trunc64to16 (Const64 [c])) - // result: (Const16 [int16(c)]) + // match: (Xor16 left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) for { - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true } - c := auxIntToInt64(v_0.AuxInt) - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(int16(c)) - return true + break } - // match: (Trunc64to16 (ZeroExt8to64 x)) - // result: (ZeroExt8to16 x) + // match: (Xor16 left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) for { - if v_0.Op != OpZeroExt8to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true } - x := v_0.Args[0] - v.reset(OpZeroExt8to16) - v.AddArg(x) - return true + break } - // match: (Trunc64to16 (ZeroExt16to64 x)) - // result: x + // match: (Xor16 left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x y) for { - if v_0.Op != OpZeroExt16to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh16x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh16Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 16 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, y) + return true } - x := v_0.Args[0] - v.copyOf(x) - return true + break } - // match: (Trunc64to16 (SignExt8to64 x)) - // result: (SignExt8to16 x) + // match: (Xor16 right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) for { - if v_0.Op != OpSignExt8to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true } - x := v_0.Args[0] - v.reset(OpSignExt8to16) - v.AddArg(x) - return true + break } - // match: (Trunc64to16 (SignExt16to64 x)) - // result: x + // match: (Xor16 right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) for { - if v_0.Op != OpSignExt16to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true } - x := v_0.Args[0] - v.copyOf(x) - return true + break } - // match: (Trunc64to16 (And64 (Const64 [y]) x)) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc64to16 x) + // match: (Xor16 right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) for { - if v_0.Op != OpAnd64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) + return true } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_0.Op != OpConst64 { + break + } + // match: (Xor16 right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) + // result: (RotateLeft16 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh16Ux8 { continue } - y := auxIntToInt64(v_0_0.AuxInt) - x := v_0_1 - if !(y&0xFFFF == 0xFFFF) { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh16x8 { continue } - v.reset(OpTrunc64to16) - v.AddArg(x) + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 16 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16)) { + continue + } + v.reset(OpRotateLeft16) + v.AddArg2(x, z) return true } break } return false } -func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { +func rewriteValuegeneric_OpXor32(v *Value) bool { + v_1 := v.Args[1] v_0 := v.Args[0] - // match: (Trunc64to32 (Const64 [c])) - // result: (Const32 [int32(c)]) - for { - if v_0.Op != OpConst64 { - break - } - c := auxIntToInt64(v_0.AuxInt) - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(int32(c)) - return true - } - // match: (Trunc64to32 (ZeroExt8to64 x)) - // result: (ZeroExt8to32 x) + b := v.Block + config := b.Func.Config + // match: (Xor32 (Const32 [c]) (Const32 [d])) + // result: (Const32 [c^d]) for { - if v_0.Op != OpZeroExt8to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpConst32 { + continue + } + d := auxIntToInt32(v_1.AuxInt) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(c ^ d) + return true } - x := v_0.Args[0] - v.reset(OpZeroExt8to32) - v.AddArg(x) - return true + break } - // match: (Trunc64to32 (ZeroExt16to64 x)) - // result: (ZeroExt16to32 x) + // match: (Xor32 x x) + // result: (Const32 [0]) for { - if v_0.Op != OpZeroExt16to64 { + x := v_0 + if x != v_1 { break } - x := v_0.Args[0] - v.reset(OpZeroExt16to32) - v.AddArg(x) + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(0) return true } - // match: (Trunc64to32 (ZeroExt32to64 x)) + // match: (Xor32 (Const32 [0]) x) // result: x for { - if v_0.Op != OpZeroExt32to64 { - break - } - x := v_0.Args[0] - v.copyOf(x) - return true - } - // match: (Trunc64to32 (SignExt8to64 x)) - // result: (SignExt8to32 x) - for { - if v_0.Op != OpSignExt8to64 { - break - } - x := v_0.Args[0] - v.reset(OpSignExt8to32) - v.AddArg(x) - return true - } - // match: (Trunc64to32 (SignExt16to64 x)) - // result: (SignExt16to32 x) - for { - if v_0.Op != OpSignExt16to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + continue + } + x := v_1 + v.copyOf(x) + return true } - x := v_0.Args[0] - v.reset(OpSignExt16to32) - v.AddArg(x) - return true + break } - // match: (Trunc64to32 (SignExt32to64 x)) - // result: x + // match: (Xor32 (Com32 x) x) + // result: (Const32 [-1]) for { - if v_0.Op != OpSignExt32to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpCom32 { + continue + } + x := v_0.Args[0] + if x != v_1 { + continue + } + v.reset(OpConst32) + v.AuxInt = int32ToAuxInt(-1) + return true } - x := v_0.Args[0] - v.copyOf(x) - return true + break } - // match: (Trunc64to32 (And64 (Const64 [y]) x)) - // cond: y&0xFFFFFFFF == 0xFFFFFFFF - // result: (Trunc64to32 x) + // match: (Xor32 (Const32 [-1]) x) + // result: (Com32 x) for { - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_0.Op != OpConst64 { - continue - } - y := auxIntToInt64(v_0_0.AuxInt) - x := v_0_1 - if !(y&0xFFFFFFFF == 0xFFFFFFFF) { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { continue } - v.reset(OpTrunc64to32) + x := v_1 + v.reset(OpCom32) v.AddArg(x) return true } break } - return false -} -func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { - v_0 := v.Args[0] - // match: (Trunc64to8 (Const64 [c])) - // result: (Const8 [int8(c)]) + // match: (Xor32 x (Xor32 x y)) + // result: y for { - if v_0.Op != OpConst64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if x != v_1_0 { + continue + } + y := v_1_1 + v.copyOf(y) + return true + } } - c := auxIntToInt64(v_0.AuxInt) - v.reset(OpConst8) - v.AuxInt = int8ToAuxInt(int8(c)) - return true + break } - // match: (Trunc64to8 (ZeroExt8to64 x)) - // result: x + // match: (Xor32 (Xor32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Xor32 i (Xor32 z x)) for { - if v_0.Op != OpZeroExt8to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpXor32 { + continue + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { + i := v_0_0 + if i.Op != OpConst32 { + continue + } + t := i.Type + z := v_0_1 + x := v_1 + if !(z.Op != OpConst32 && x.Op != OpConst32) { + continue + } + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpXor32, t) + v0.AddArg2(z, x) + v.AddArg2(i, v0) + return true + } } - x := v_0.Args[0] - v.copyOf(x) - return true + break } - // match: (Trunc64to8 (SignExt8to64 x)) - // result: x + // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) + // result: (Xor32 (Const32 [c^d]) x) for { - if v_0.Op != OpSignExt8to64 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpConst32 { + continue + } + t := v_0.Type + c := auxIntToInt32(v_0.AuxInt) + if v_1.Op != OpXor32 { + continue + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + v_1_1 := v_1.Args[1] + for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { + if v_1_0.Op != OpConst32 || v_1_0.Type != t { + continue + } + d := auxIntToInt32(v_1_0.AuxInt) + x := v_1_1 + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(c ^ d) + v.AddArg2(v0, x) + return true + } } - x := v_0.Args[0] - v.copyOf(x) - return true + break } - // match: (Trunc64to8 (And64 (Const64 [y]) x)) - // cond: y&0xFF == 0xFF - // result: (Trunc64to8 x) + // match: (Xor32 (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) + // cond: c < 32 && d == 32-c && canRotate(config, 32) + // result: (RotateLeft32 x z) for { - if v_0.Op != OpAnd64 { - break - } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { - if v_0_0.Op != OpConst64 { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh32x64 { continue } - y := auxIntToInt64(v_0_0.AuxInt) - x := v_0_1 - if !(y&0xFF == 0xFF) { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - v.reset(OpTrunc64to8) - v.AddArg(x) + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh32Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 32 && d == 32-c && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) return true } break } - return false -} -func rewriteValuegeneric_OpXor16(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Xor16 (Const16 [c]) (Const16 [d])) - // result: (Const16 [c^d]) + // match: (Xor32 left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 { + left := v_0 + if left.Op != OpLsh32x64 { continue } - c := auxIntToInt16(v_0.AuxInt) - if v_1.Op != OpConst16 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux64 { continue } - d := auxIntToInt16(v_1.AuxInt) - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(c ^ d) + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Xor16 x x) - // result: (Const16 [0]) + // match: (Xor32 left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { - x := v_0 - if x != v_1 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh32x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) + return true } - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(0) - return true + break } - // match: (Xor16 (Const16 [0]) x) - // result: x + // match: (Xor32 left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 { + left := v_0 + if left.Op != OpLsh32x16 { continue } - x := v_1 - v.copyOf(x) + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Xor16 (Com16 x) x) - // result: (Const16 [-1]) + // match: (Xor32 left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpCom16 { + left := v_0 + if left.Op != OpLsh32x8 { continue } - x := v_0.Args[0] - if x != v_1 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh32Ux8 { continue } - v.reset(OpConst16) - v.AuxInt = int16ToAuxInt(-1) + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 32 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, y) return true } break } - // match: (Xor16 (Const16 [-1]) x) - // result: (Com16 x) + // match: (Xor32 right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + right := v_0 + if right.Op != OpRsh32Ux64 { continue } - x := v_1 - v.reset(OpCom16) - v.AddArg(x) + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) return true } break } - // match: (Xor16 x (Xor16 x y)) - // result: y + // match: (Xor32 right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpXor16 { + right := v_0 + if right.Op != OpRsh32Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if x != v_1_0 { - continue - } - y := v_1_1 - v.copyOf(y) - return true + z := left.Args[1] + if z.Op != OpSub32 { + continue } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue + } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } - // match: (Xor16 (Xor16 i:(Const16 ) z) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) + // match: (Xor32 right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpXor16 { + right := v_0 + if right.Op != OpRsh32Ux16 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - i := v_0_0 - if i.Op != OpConst16 { - continue - } - t := i.Type - z := v_0_1 - x := v_1 - if !(z.Op != OpConst16 && x.Op != OpConst16) { - continue - } - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg2(z, x) - v.AddArg2(i, v0) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } - // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) - // result: (Xor16 (Const16 [c^d]) x) + // match: (Xor32 right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) + // result: (RotateLeft32 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst16 { + right := v_0 + if right.Op != OpRsh32Ux8 { continue } - t := v_0.Type - c := auxIntToInt16(v_0.AuxInt) - if v_1.Op != OpXor16 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh32x8 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst16 || v_1_0.Type != t { - continue - } - d := auxIntToInt16(v_1_0.AuxInt) - x := v_1_1 - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int16ToAuxInt(c ^ d) - v.AddArg2(v0, x) - return true + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 32 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32)) { + continue } + v.reset(OpRotateLeft32) + v.AddArg2(x, z) + return true } break } return false } -func rewriteValuegeneric_OpXor32(v *Value) bool { +func rewriteValuegeneric_OpXor64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (Xor32 (Const32 [c]) (Const32 [d])) - // result: (Const32 [c^d]) + config := b.Func.Config + // match: (Xor64 (Const64 [c]) (Const64 [d])) + // result: (Const64 [c^d]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32 { + if v_0.Op != OpConst64 { continue } - c := auxIntToInt32(v_0.AuxInt) - if v_1.Op != OpConst32 { + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpConst64 { continue } - d := auxIntToInt32(v_1.AuxInt) - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(c ^ d) + d := auxIntToInt64(v_1.AuxInt) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(c ^ d) return true } break } - // match: (Xor32 x x) - // result: (Const32 [0]) + // match: (Xor64 x x) + // result: (Const64 [0]) for { x := v_0 if x != v_1 { break } - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(0) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(0) return true } - // match: (Xor32 (Const32 [0]) x) + // match: (Xor64 (Const64 [0]) x) // result: x for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { continue } x := v_1 @@ -25404,43 +30453,43 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { } break } - // match: (Xor32 (Com32 x) x) - // result: (Const32 [-1]) + // match: (Xor64 (Com64 x) x) + // result: (Const64 [-1]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpCom32 { + if v_0.Op != OpCom64 { continue } x := v_0.Args[0] if x != v_1 { continue } - v.reset(OpConst32) - v.AuxInt = int32ToAuxInt(-1) + v.reset(OpConst64) + v.AuxInt = int64ToAuxInt(-1) return true } break } - // match: (Xor32 (Const32 [-1]) x) - // result: (Com32 x) + // match: (Xor64 (Const64 [-1]) x) + // result: (Com64 x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { continue } x := v_1 - v.reset(OpCom32) + v.reset(OpCom64) v.AddArg(x) return true } break } - // match: (Xor32 x (Xor32 x y)) + // match: (Xor64 x (Xor64 x y)) // result: y for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpXor32 { + if v_1.Op != OpXor64 { continue } _ = v_1.Args[1] @@ -25457,12 +30506,12 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { } break } - // match: (Xor32 (Xor32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) + // match: (Xor64 (Xor64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Xor64 i (Xor64 z x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpXor32 { + if v_0.Op != OpXor64 { continue } _ = v_0.Args[1] @@ -25470,17 +30519,17 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v_0_1 := v_0.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { i := v_0_0 - if i.Op != OpConst32 { + if i.Op != OpConst64 { continue } t := i.Type z := v_0_1 x := v_1 - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if !(z.Op != OpConst64 && x.Op != OpConst64) { continue } - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpXor32, t) + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpXor64, t) v0.AddArg2(z, x) v.AddArg2(i, v0) return true @@ -25488,195 +30537,341 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { } break } - // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) - // result: (Xor32 (Const32 [c^d]) x) + // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) + // result: (Xor64 (Const64 [c^d]) x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst32 { + if v_0.Op != OpConst64 { continue } t := v_0.Type - c := auxIntToInt32(v_0.AuxInt) - if v_1.Op != OpXor32 { + c := auxIntToInt64(v_0.AuxInt) + if v_1.Op != OpXor64 { continue } _ = v_1.Args[1] v_1_0 := v_1.Args[0] v_1_1 := v_1.Args[1] for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst32 || v_1_0.Type != t { + if v_1_0.Op != OpConst64 || v_1_0.Type != t { continue } - d := auxIntToInt32(v_1_0.AuxInt) + d := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int32ToAuxInt(c ^ d) + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(c ^ d) v.AddArg2(v0, x) return true } } break } - return false -} -func rewriteValuegeneric_OpXor64(v *Value) bool { - v_1 := v.Args[1] - v_0 := v.Args[0] - b := v.Block - // match: (Xor64 (Const64 [c]) (Const64 [d])) - // result: (Const64 [c^d]) + // match: (Xor64 (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) + // cond: c < 64 && d == 64-c && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 { + if v_0.Op != OpLsh64x64 { continue } - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpConst64 { + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { continue } - d := auxIntToInt64(v_1.AuxInt) - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(c ^ d) + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh64Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 64 && d == 64-c && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (Xor64 x x) - // result: (Const64 [0]) + // match: (Xor64 left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { - x := v_0 - if x != v_1 { - break + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(0) - return true + break } - // match: (Xor64 (Const64 [0]) x) - // result: x + // match: (Xor64 left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 { + left := v_0 + if left.Op != OpLsh64x32 { continue } - x := v_1 - v.copyOf(x) + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (Xor64 (Com64 x) x) - // result: (Const64 [-1]) + // match: (Xor64 left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpCom64 { + left := v_0 + if left.Op != OpLsh64x16 { continue } - x := v_0.Args[0] - if x != v_1 { + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor64 left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh64x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh64Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 64 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { continue } - v.reset(OpConst64) - v.AuxInt = int64ToAuxInt(-1) + v.reset(OpRotateLeft64) + v.AddArg2(x, y) return true } break } - // match: (Xor64 (Const64 [-1]) x) - // result: (Com64 x) + // match: (Xor64 right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + right := v_0 + if right.Op != OpRsh64Ux64 { continue } - x := v_1 - v.reset(OpCom64) - v.AddArg(x) + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) return true } break } - // match: (Xor64 x (Xor64 x y)) - // result: y + // match: (Xor64 right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - x := v_0 - if v_1.Op != OpXor64 { + right := v_0 + if right.Op != OpRsh64Ux32 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if x != v_1_0 { - continue - } - y := v_1_1 - v.copyOf(y) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x32 { + continue } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue + } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true } break } - // match: (Xor64 (Xor64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) + // match: (Xor64 right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpXor64 { + right := v_0 + if right.Op != OpRsh64Ux16 { continue } - _ = v_0.Args[1] - v_0_0 := v_0.Args[0] - v_0_1 := v_0.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 { - i := v_0_0 - if i.Op != OpConst64 { - continue - } - t := i.Type - z := v_0_1 - x := v_1 - if !(z.Op != OpConst64 && x.Op != OpConst64) { - continue - } - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg2(z, x) - v.AddArg2(i, v0) - return true + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true } break } - // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) - // result: (Xor64 (Const64 [c^d]) x) + // match: (Xor64 right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) + // result: (RotateLeft64 x z) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_0.Op != OpConst64 { + right := v_0 + if right.Op != OpRsh64Ux8 { continue } - t := v_0.Type - c := auxIntToInt64(v_0.AuxInt) - if v_1.Op != OpXor64 { + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh64x8 { continue } - _ = v_1.Args[1] - v_1_0 := v_1.Args[0] - v_1_1 := v_1.Args[1] - for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 { - if v_1_0.Op != OpConst64 || v_1_0.Type != t { - continue - } - d := auxIntToInt64(v_1_0.AuxInt) - x := v_1_1 - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = int64ToAuxInt(c ^ d) - v.AddArg2(v0, x) - return true + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 64 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64)) { + continue } + v.reset(OpRotateLeft64) + v.AddArg2(x, z) + return true } break } @@ -25686,6 +30881,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block + config := b.Func.Config // match: (Xor8 (Const8 [c]) (Const8 [d])) // result: (Const8 [c^d]) for { @@ -25842,6 +31038,314 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { } break } + // match: (Xor8 (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) + // cond: c < 8 && d == 8-c && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + if v_0.Op != OpLsh8x64 { + continue + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + if z.Op != OpConst64 { + continue + } + c := auxIntToInt64(z.AuxInt) + if v_1.Op != OpRsh8Ux64 { + continue + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + continue + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + continue + } + d := auxIntToInt64(v_1_1.AuxInt) + if !(c < 8 && d == 8-c && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x64 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux64 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub64 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst64 || auxIntToInt64(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x32 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux32 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub32 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst32 || auxIntToInt32(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x16 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux16 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub16 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst16 || auxIntToInt16(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + left := v_0 + if left.Op != OpLsh8x8 { + continue + } + y := left.Args[1] + x := left.Args[0] + right := v_1 + if right.Op != OpRsh8Ux8 { + continue + } + _ = right.Args[1] + if x != right.Args[0] { + continue + } + right_1 := right.Args[1] + if right_1.Op != OpSub8 { + continue + } + _ = right_1.Args[1] + right_1_0 := right_1.Args[0] + if right_1_0.Op != OpConst8 || auxIntToInt8(right_1_0.AuxInt) != 8 || y != right_1.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, y) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux64 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x64 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub64 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst64 || auxIntToInt64(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux32 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x32 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub32 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst32 || auxIntToInt32(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux16 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x16 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub16 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst16 || auxIntToInt16(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } + // match: (Xor8 right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) + // cond: (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) + // result: (RotateLeft8 x z) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + right := v_0 + if right.Op != OpRsh8Ux8 { + continue + } + y := right.Args[1] + x := right.Args[0] + left := v_1 + if left.Op != OpLsh8x8 { + continue + } + _ = left.Args[1] + if x != left.Args[0] { + continue + } + z := left.Args[1] + if z.Op != OpSub8 { + continue + } + _ = z.Args[1] + z_0 := z.Args[0] + if z_0.Op != OpConst8 || auxIntToInt8(z_0.AuxInt) != 8 || y != z.Args[1] || !((shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8)) { + continue + } + v.reset(OpRotateLeft8) + v.AddArg2(x, z) + return true + } + break + } return false } func rewriteValuegeneric_OpZero(v *Value) bool { diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go index 58c8dde1a0..278a47da29 100644 --- a/src/cmd/compile/internal/test/shift_test.go +++ b/src/cmd/compile/internal/test/shift_test.go @@ -1039,3 +1039,25 @@ func BenchmarkShiftArithmeticRight(b *testing.B) { } shiftSink64 = x } + +//go:noinline +func incorrectRotate1(x, c uint64) uint64 { + // This should not compile to a rotate instruction. + return x<>(64-c) +} + +//go:noinline +func incorrectRotate2(x uint64) uint64 { + var c uint64 = 66 + // This should not compile to a rotate instruction. + return x<>(64-c) +} + +func TestIncorrectRotate(t *testing.T) { + if got := incorrectRotate1(1, 66); got != 0 { + t.Errorf("got %x want 0", got) + } + if got := incorrectRotate2(1); got != 0 { + t.Errorf("got %x want 0", got) + } +} diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 378100b162..90bb0b9c09 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -158,6 +158,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { ssa.Op386SHLL, ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB, ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB, + ssa.Op386ROLL, ssa.Op386ROLW, ssa.Op386ROLB, ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD, ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD, ssa.Op386PXOR, diff --git a/test/codegen/rotate.go b/test/codegen/rotate.go index 204efaeafc..5876050ba0 100644 --- a/test/codegen/rotate.go +++ b/test/codegen/rotate.go @@ -39,8 +39,8 @@ func rot64(x uint64) uint64 { // s390x:"RISBGZ\t[$]0, [$]63, [$]10, " // ppc64:"ROTL\t[$]10" // ppc64le:"ROTL\t[$]10" - // arm64:"ROR\t[$]57" // TODO this is not great line numbering, but then again, the instruction did appear - // s390x:"RISBGZ\t[$]0, [$]63, [$]7, " // TODO ditto + // arm64:"ROR\t[$]54" + // s390x:"RISBGZ\t[$]0, [$]63, [$]10, " a += bits.RotateLeft64(x, 10) return a @@ -77,8 +77,8 @@ func rot32(x uint32) uint32 { // s390x:"RLL\t[$]10" // ppc64:"ROTLW\t[$]10" // ppc64le:"ROTLW\t[$]10" - // arm64:"RORW\t[$]25" // TODO this is not great line numbering, but then again, the instruction did appear - // s390x:"RLL\t[$]7" // TODO ditto + // arm64:"RORW\t[$]22" + // s390x:"RLL\t[$]10" a += bits.RotateLeft32(x, 10) return a @@ -123,12 +123,16 @@ func rot64nc(x uint64, z uint) uint64 { z &= 63 - // amd64:"ROLQ" - // ppc64:"ROTL" - // ppc64le:"ROTL" + // amd64:"ROLQ",-"AND" + // arm64:"ROR","NEG",-"AND" + // ppc64:"ROTL",-"NEG",-"AND" + // ppc64le:"ROTL",-"NEG",-"AND" a += x<>(64-z) - // amd64:"RORQ" + // amd64:"RORQ",-"AND" + // arm64:"ROR",-"NEG",-"AND" + // ppc64:"ROTL","NEG",-"AND" + // ppc64le:"ROTL","NEG",-"AND" a += x>>z | x<<(64-z) return a @@ -139,12 +143,16 @@ func rot32nc(x uint32, z uint) uint32 { z &= 31 - // amd64:"ROLL" - // ppc64:"ROTLW" - // ppc64le:"ROTLW" + // amd64:"ROLL",-"AND" + // arm64:"ROR","NEG",-"AND" + // ppc64:"ROTLW",-"NEG",-"AND" + // ppc64le:"ROTLW",-"NEG",-"AND" a += x<>(32-z) - // amd64:"RORL" + // amd64:"RORL",-"AND" + // arm64:"ROR",-"NEG",-"AND" + // ppc64:"ROTLW","NEG",-"AND" + // ppc64le:"ROTLW","NEG",-"AND" a += x>>z | x<<(32-z) return a @@ -155,10 +163,10 @@ func rot16nc(x uint16, z uint) uint16 { z &= 15 - // amd64:"ROLW" + // amd64:"ROLW",-"ANDQ" a += x<>(16-z) - // amd64:"RORW" + // amd64:"RORW",-"ANDQ" a += x>>z | x<<(16-z) return a @@ -169,10 +177,10 @@ func rot8nc(x uint8, z uint) uint8 { z &= 7 - // amd64:"ROLB" + // amd64:"ROLB",-"ANDQ" a += x<>(8-z) - // amd64:"RORB" + // amd64:"RORB",-"ANDQ" a += x>>z | x<<(8-z) return a