From 68da265c8e6b32753fb5788716953bac16b374c0 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Wed, 29 Mar 2017 18:06:04 +0000 Subject: [PATCH] Revert "cmd/compile: automatically handle commuting ops in rewrite rules" This reverts commit 041ecb697f0e867a2bb0bf219cc2fd5f77057c2e. Reason for revert: Not working on S390x and some 386 archs. I have a guess why the S390x is failing. No clue on the 386 yet. Revert until I can figure it out. Change-Id: I64f1ce78fa6d1037ebe7ee2a8a8107cb4c1db70c Reviewed-on: https://go-review.googlesource.com/38790 Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/386.rules | 90 +- src/cmd/compile/internal/ssa/gen/386Ops.go | 34 +- src/cmd/compile/internal/ssa/gen/AMD64.rules | 816 +- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 102 +- src/cmd/compile/internal/ssa/gen/ARM.rules | 56 +- src/cmd/compile/internal/ssa/gen/ARM64.rules | 188 +- src/cmd/compile/internal/ssa/gen/MIPS.rules | 12 +- src/cmd/compile/internal/ssa/gen/MIPS64.rules | 6 + src/cmd/compile/internal/ssa/gen/PPC64.rules | 8 +- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/S390X.rules | 744 +- src/cmd/compile/internal/ssa/gen/S390XOps.go | 60 +- .../compile/internal/ssa/gen/generic.rules | 109 +- .../compile/internal/ssa/gen/genericOps.go | 29 +- src/cmd/compile/internal/ssa/gen/rulegen.go | 190 +- src/cmd/compile/internal/ssa/opGen.go | 388 +- src/cmd/compile/internal/ssa/rewrite386.go | 10069 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 81406 ++-------------- src/cmd/compile/internal/ssa/rewriteARM.go | 800 +- src/cmd/compile/internal/ssa/rewriteARM64.go | 2535 +- src/cmd/compile/internal/ssa/rewriteMIPS.go | 579 +- src/cmd/compile/internal/ssa/rewriteMIPS64.go | 358 +- src/cmd/compile/internal/ssa/rewritePPC64.go | 396 +- src/cmd/compile/internal/ssa/rewriteS390X.go | 26146 +---- src/cmd/compile/internal/ssa/rewritedec.go | 6 +- .../compile/internal/ssa/rewritegeneric.go | 9910 +- 26 files changed, 19042 insertions(+), 115997 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index c67796ea09..13d9bb935f 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -431,7 +431,9 @@ // fold constants into instructions (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) +(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) (ADDLcarry x (MOVLconst [c])) -> (ADDLconstcarry [c] x) +(ADDLcarry (MOVLconst [c]) x) -> (ADDLconstcarry [c] x) (ADCL x (MOVLconst [c]) f) -> (ADCLconst [c] x f) (ADCL (MOVLconst [c]) x f) -> (ADCLconst [c] x f) @@ -441,8 +443,10 @@ (SBBL x (MOVLconst [c]) f) -> (SBBLconst [c] x f) (MULL x (MOVLconst [c])) -> (MULLconst [c] x) +(MULL (MOVLconst [c]) x) -> (MULLconst [c] x) (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) +(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) @@ -451,8 +455,10 @@ (MULLconst [c] (MULLconst [d] x)) -> (MULLconst [int64(int32(c * d))] x) (ORL x (MOVLconst [c])) -> (ORLconst [c] x) +(ORL (MOVLconst [c]) x) -> (ORLconst [c] x) (XORL x (MOVLconst [c])) -> (XORLconst [c] x) +(XORL (MOVLconst [c]) x) -> (XORLconst [c] x) (SHLL x (MOVLconst [c])) -> (SHLLconst [c&31] x) (SHRL x (MOVLconst [c])) -> (SHRLconst [c&31] x) @@ -473,17 +479,26 @@ // Rotate instructions -(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x) -( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x) -(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x) - -(ADDL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c]) -( ORL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c]) -(XORL (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c]) - -(ADDL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c]) -( ORL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c]) -(XORL (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c]) +(ADDL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x) +( ORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x) +(XORL (SHLLconst [c] x) (SHRLconst [32-c] x)) -> (ROLLconst [c ] x) +(ADDL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x) +( ORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x) +(XORL (SHRLconst [c] x) (SHLLconst [32-c] x)) -> (ROLLconst [32-c] x) + +(ADDL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +( ORL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +(XORL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +(ADDL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) +( ORL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) +(XORL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) + +(ADDL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +( ORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +(XORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +(ADDL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) +( ORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) +(XORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x) (ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x) @@ -544,9 +559,9 @@ (MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst [log2(c-2)] x) x) (MULLconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAL4 (SHLLconst [log2(c-4)] x) x) (MULLconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAL8 (SHLLconst [log2(c-8)] x) x) -(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLLconst [log2(c/3)] (LEAL2 x x)) -(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLLconst [log2(c/5)] (LEAL4 x x)) -(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLLconst [log2(c/9)] (LEAL8 x x)) +(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLLconst [log2(c/3)] (LEAL2 x x)) +(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLLconst [log2(c/5)] (LEAL4 x x)) +(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLLconst [log2(c/9)] (LEAL8 x x)) // combine add/shift into LEAL (ADDL x (SHLLconst [3] y)) -> (LEAL8 x y) @@ -554,16 +569,19 @@ (ADDL x (SHLLconst [1] y)) -> (LEAL2 x y) (ADDL x (ADDL y y)) -> (LEAL2 x y) (ADDL x (ADDL x y)) -> (LEAL2 y x) +(ADDL x (ADDL y x)) -> (LEAL2 y x) // combine ADDL/ADDLconst into LEAL1 (ADDLconst [c] (ADDL x y)) -> (LEAL1 [c] x y) (ADDL (ADDLconst [c] x) y) -> (LEAL1 [c] x y) +(ADDL x (ADDLconst [c] y)) -> (LEAL1 [c] x y) // fold ADDL into LEAL (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) (LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y) (ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y) +(ADDL (LEAL [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAL1 [c] {s} x y) // fold ADDLconst into LEALx (ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(c+d) -> (LEAL1 [c+d] {s} x y) @@ -571,6 +589,7 @@ (ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(c+d) -> (LEAL4 [c+d] {s} x y) (ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(c+d) -> (LEAL8 [c+d] {s} x y) (LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL1 [c+d] {s} x y) +(LEAL1 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAL1 [c+d] {s} x y) (LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL2 [c+d] {s} x y) (LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAL2 [c+2*d] {s} x y) (LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAL4 [c+d] {s} x y) @@ -580,8 +599,12 @@ // fold shifts into LEALx (LEAL1 [c] {s} x (SHLLconst [1] y)) -> (LEAL2 [c] {s} x y) +(LEAL1 [c] {s} (SHLLconst [1] x) y) -> (LEAL2 [c] {s} y x) (LEAL1 [c] {s} x (SHLLconst [2] y)) -> (LEAL4 [c] {s} x y) +(LEAL1 [c] {s} (SHLLconst [2] x) y) -> (LEAL4 [c] {s} y x) (LEAL1 [c] {s} x (SHLLconst [3] y)) -> (LEAL8 [c] {s} x y) +(LEAL1 [c] {s} (SHLLconst [3] x) y) -> (LEAL8 [c] {s} y x) + (LEAL2 [c] {s} x (SHLLconst [1] y)) -> (LEAL4 [c] {s} x y) (LEAL2 [c] {s} x (SHLLconst [2] y)) -> (LEAL8 [c] {s} x y) (LEAL4 [c] {s} x (SHLLconst [1] y)) -> (LEAL8 [c] {s} x y) @@ -865,6 +888,8 @@ // LEAL into LEAL1 (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> + (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) // LEAL1 into LEAL (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> @@ -1103,12 +1128,14 @@ (CMPWconst x [0]) -> (TESTW x x) (CMPBconst x [0]) -> (TESTB x x) +// Move shifts to second argument of ORs. Helps load combining rules below. +(ORL x:(SHLLconst _) y) && y.Op != Op386SHLLconst -> (ORL y x) + // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. -(ORL x0:(MOVBload [i0] {s} p mem) - s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) - && i1 == i0+1 +(ORL x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 @@ -1116,14 +1143,12 @@ && clobber(x0) && clobber(x1) && clobber(s0) - -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) + -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) (ORL o0:(ORL - x0:(MOVWload [i0] {s} p mem) - s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) - s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem))) - && i2 == i0+2 - && i3 == i0+3 + x0:(MOVWload [i] {s} p mem) + s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) + s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -1137,11 +1162,10 @@ && clobber(s0) && clobber(s1) && clobber(o0) - -> @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) + -> @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) -(ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) - s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - && i1==i0+1 +(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 @@ -1149,14 +1173,12 @@ && clobber(x0) && clobber(x1) && clobber(s0) - -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) + -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) (ORL o0:(ORL - x0:(MOVWloadidx1 [i0] {s} p idx mem) - s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) - s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - && i2 == i0+2 - && i3 == i0+3 + x0:(MOVWloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) + s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 @@ -1170,7 +1192,7 @@ && clobber(s0) && clobber(s1) && clobber(o0) - -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i] {s} p idx mem) // Combine constant stores into larger (unaligned) stores. (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem)) diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index b287775194..2a638ff1ed 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -193,10 +193,10 @@ func init() { {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint - {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width - {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width - {name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1] + {name: "MULLQU", argLength: 2, reg: gp21mul, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1] {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits @@ -229,9 +229,9 @@ func init() { {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f32 {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags", usesScratch: true}, // arg0 compare to arg1, f64 - {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 @@ -314,7 +314,7 @@ func init() { {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux - {name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAL1", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux @@ -331,17 +331,17 @@ func init() { {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem // indexed loads/stores - {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem - {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem + {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem // TODO: sign-extending indexed loads - {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem - {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem + {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem // TODO: add size-mismatched indexed loads, like MOVBstoreidx4. // For storeconst ops, the AuxInt field encodes both diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index ac45cd71e5..2e3e6c01ba 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -515,6 +515,10 @@ (NE (TESTB (SETA cmp) (SETA cmp)) yes no) -> (UGT cmp yes no) (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) -> (UGE cmp yes no) +// Normalize TESTx argument order for BTx rewrites below. +(TESTQ y x:(SHLQ _ _)) && y.Op != OpAMD64SHLQ -> (TESTQ x y) +(TESTL y x:(SHLL _ _)) && y.Op != OpAMD64SHLL -> (TESTL x y) + // Recognize bit tests: a&(1< (ADDQconst [c] x) +(ADDQ (MOVQconst [c]) x) && is32Bit(c) -> (ADDQconst [c] x) (ADDL x (MOVLconst [c])) -> (ADDLconst [c] x) +(ADDL (MOVLconst [c]) x) -> (ADDLconst [c] x) (SUBQ x (MOVQconst [c])) && is32Bit(c) -> (SUBQconst x [c]) (SUBQ (MOVQconst [c]) x) && is32Bit(c) -> (NEGQ (SUBQconst x [c])) @@ -562,10 +568,14 @@ (SUBL (MOVLconst [c]) x) -> (NEGL (SUBLconst x [c])) (MULQ x (MOVQconst [c])) && is32Bit(c) -> (MULQconst [c] x) +(MULQ (MOVQconst [c]) x) && is32Bit(c) -> (MULQconst [c] x) (MULL x (MOVLconst [c])) -> (MULLconst [c] x) +(MULL (MOVLconst [c]) x) -> (MULLconst [c] x) (ANDQ x (MOVQconst [c])) && is32Bit(c) -> (ANDQconst [c] x) +(ANDQ (MOVQconst [c]) x) && is32Bit(c) -> (ANDQconst [c] x) (ANDL x (MOVLconst [c])) -> (ANDLconst [c] x) +(ANDL (MOVLconst [c]) x) -> (ANDLconst [c] x) (ANDLconst [c] (ANDLconst [d] x)) -> (ANDLconst [c & d] x) (ANDQconst [c] (ANDQconst [d] x)) -> (ANDQconst [c & d] x) @@ -577,10 +587,14 @@ (MULQconst [c] (MULQconst [d] x)) && is32Bit(c*d) -> (MULQconst [c * d] x) (ORQ x (MOVQconst [c])) && is32Bit(c) -> (ORQconst [c] x) +(ORQ (MOVQconst [c]) x) && is32Bit(c) -> (ORQconst [c] x) (ORL x (MOVLconst [c])) -> (ORLconst [c] x) +(ORL (MOVLconst [c]) x) -> (ORLconst [c] x) (XORQ x (MOVQconst [c])) && is32Bit(c) -> (XORQconst [c] x) +(XORQ (MOVQconst [c]) x) && is32Bit(c) -> (XORQconst [c] x) (XORL x (MOVLconst [c])) -> (XORLconst [c] x) +(XORL (MOVLconst [c]) x) -> (XORLconst [c] x) (SHLQ x (MOVQconst [c])) -> (SHLQconst [c&63] x) (SHLQ x (MOVLconst [c])) -> (SHLQconst [c&63] x) @@ -629,21 +643,33 @@ // Rotate instructions -(ADDQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) -( ORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) -(XORQ (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c -> (ROLQconst x [c]) - -(ADDL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) -( ORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) -(XORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c]) - -(ADDL (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) -( ORL (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) -(XORL (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c]) - -(ADDL (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) -( ORL (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) -(XORL (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c]) +(ADDQ (SHLQconst x [c]) (SHRQconst x [64-c])) -> (ROLQconst x [ c]) +( ORQ (SHLQconst x [c]) (SHRQconst x [64-c])) -> (ROLQconst x [ c]) +(XORQ (SHLQconst x [c]) (SHRQconst x [64-c])) -> (ROLQconst x [ c]) +(ADDQ (SHRQconst x [c]) (SHLQconst x [64-c])) -> (ROLQconst x [64-c]) +( ORQ (SHRQconst x [c]) (SHLQconst x [64-c])) -> (ROLQconst x [64-c]) +(XORQ (SHRQconst x [c]) (SHLQconst x [64-c])) -> (ROLQconst x [64-c]) + +(ADDL (SHLLconst x [c]) (SHRLconst x [32-c])) -> (ROLLconst x [ c]) +( ORL (SHLLconst x [c]) (SHRLconst x [32-c])) -> (ROLLconst x [ c]) +(XORL (SHLLconst x [c]) (SHRLconst x [32-c])) -> (ROLLconst x [ c]) +(ADDL (SHRLconst x [c]) (SHLLconst x [32-c])) -> (ROLLconst x [32-c]) +( ORL (SHRLconst x [c]) (SHLLconst x [32-c])) -> (ROLLconst x [32-c]) +(XORL (SHRLconst x [c]) (SHLLconst x [32-c])) -> (ROLLconst x [32-c]) + +(ADDL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +( ORL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +(XORL (SHLLconst x [c]) (SHRWconst x [16-c])) && c < 16 && t.Size() == 2 -> (ROLWconst x [ c]) +(ADDL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) +( ORL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) +(XORL (SHRWconst x [c]) (SHLLconst x [16-c])) && c > 0 && t.Size() == 2 -> (ROLWconst x [16-c]) + +(ADDL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +( ORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +(XORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) && c < 8 && t.Size() == 1 -> (ROLBconst x [ c]) +(ADDL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) +( ORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) +(XORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) && c > 0 && t.Size() == 1 -> (ROLBconst x [ 8-c]) (ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x) (ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x) @@ -721,9 +747,9 @@ (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst [log2(c-2)] x) x) (MULQconst [c] x) && isPowerOfTwo(c-4) && c >= 68 -> (LEAQ4 (SHLQconst [log2(c-4)] x) x) (MULQconst [c] x) && isPowerOfTwo(c-8) && c >= 136 -> (LEAQ8 (SHLQconst [log2(c-8)] x) x) -(MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SHLQconst [log2(c/3)] (LEAQ2 x x)) -(MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SHLQconst [log2(c/5)] (LEAQ4 x x)) -(MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SHLQconst [log2(c/9)] (LEAQ8 x x)) +(MULQconst [c] x) && c%3 == 0 && isPowerOfTwo(c/3)-> (SHLQconst [log2(c/3)] (LEAQ2 x x)) +(MULQconst [c] x) && c%5 == 0 && isPowerOfTwo(c/5)-> (SHLQconst [log2(c/5)] (LEAQ4 x x)) +(MULQconst [c] x) && c%9 == 0 && isPowerOfTwo(c/9)-> (SHLQconst [log2(c/9)] (LEAQ8 x x)) // combine add/shift into LEAQ (ADDQ x (SHLQconst [3] y)) -> (LEAQ8 x y) @@ -731,16 +757,19 @@ (ADDQ x (SHLQconst [1] y)) -> (LEAQ2 x y) (ADDQ x (ADDQ y y)) -> (LEAQ2 x y) (ADDQ x (ADDQ x y)) -> (LEAQ2 y x) +(ADDQ x (ADDQ y x)) -> (LEAQ2 y x) // combine ADDQ/ADDQconst into LEAQ1 (ADDQconst [c] (ADDQ x y)) -> (LEAQ1 [c] x y) (ADDQ (ADDQconst [c] x) y) -> (LEAQ1 [c] x y) +(ADDQ x (ADDQconst [c] y)) -> (LEAQ1 [c] x y) // fold ADDQ into LEAQ (ADDQconst [c] (LEAQ [d] {s} x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) (LEAQ [c] {s} (ADDQconst [d] x)) && is32Bit(c+d) -> (LEAQ [c+d] {s} x) (LEAQ [c] {s} (ADDQ x y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) (ADDQ x (LEAQ [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) +(ADDQ (LEAQ [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (LEAQ1 [c] {s} x y) // fold ADDQconst into LEAQx (ADDQconst [c] (LEAQ1 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ1 [c+d] {s} x y) @@ -748,6 +777,7 @@ (ADDQconst [c] (LEAQ4 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ4 [c+d] {s} x y) (ADDQconst [c] (LEAQ8 [d] {s} x y)) && is32Bit(c+d) -> (LEAQ8 [c+d] {s} x y) (LEAQ1 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ1 [c+d] {s} x y) +(LEAQ1 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+d) && y.Op != OpSB -> (LEAQ1 [c+d] {s} x y) (LEAQ2 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ2 [c+d] {s} x y) (LEAQ2 [c] {s} x (ADDQconst [d] y)) && is32Bit(c+2*d) && y.Op != OpSB -> (LEAQ2 [c+2*d] {s} x y) (LEAQ4 [c] {s} (ADDQconst [d] x) y) && is32Bit(c+d) && x.Op != OpSB -> (LEAQ4 [c+d] {s} x y) @@ -757,8 +787,12 @@ // fold shifts into LEAQx (LEAQ1 [c] {s} x (SHLQconst [1] y)) -> (LEAQ2 [c] {s} x y) +(LEAQ1 [c] {s} (SHLQconst [1] x) y) -> (LEAQ2 [c] {s} y x) (LEAQ1 [c] {s} x (SHLQconst [2] y)) -> (LEAQ4 [c] {s} x y) +(LEAQ1 [c] {s} (SHLQconst [2] x) y) -> (LEAQ4 [c] {s} y x) (LEAQ1 [c] {s} x (SHLQconst [3] y)) -> (LEAQ8 [c] {s} x y) +(LEAQ1 [c] {s} (SHLQconst [3] x) y) -> (LEAQ8 [c] {s} y x) + (LEAQ2 [c] {s} x (SHLQconst [1] y)) -> (LEAQ4 [c] {s} x y) (LEAQ2 [c] {s} x (SHLQconst [2] y)) -> (LEAQ8 [c] {s} x y) (LEAQ4 [c] {s} x (SHLQconst [1] y)) -> (LEAQ8 [c] {s} x y) @@ -1100,6 +1134,8 @@ // LEAQ into LEAQ1 (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB -> + (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) // LEAQ1 into LEAQ (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> @@ -1381,6 +1417,10 @@ (TESTL (MOVLconst [c]) x) -> (TESTLconst [c] x) (TESTW (MOVLconst [c]) x) -> (TESTWconst [c] x) (TESTB (MOVLconst [c]) x) -> (TESTBconst [c] x) +(TESTQ x (MOVQconst [c])) && is32Bit(c) -> (TESTQconst [c] x) +(TESTL x (MOVLconst [c])) -> (TESTLconst [c] x) +(TESTW x (MOVLconst [c])) -> (TESTWconst [c] x) +(TESTB x (MOVLconst [c])) -> (TESTBconst [c] x) // TEST %reg,%reg is shorter than CMP (CMPQconst x [0]) -> (TESTQ x x) @@ -1388,551 +1428,355 @@ (CMPWconst x [0]) -> (TESTW x x) (CMPBconst x [0]) -> (TESTB x x) +// Move shifts to second argument of ORs. Helps load combining rules below. +(ORQ x:(SHLQconst _) y) && y.Op != OpAMD64SHLQconst -> (ORQ y x) +(ORL x:(SHLLconst _) y) && y.Op != OpAMD64SHLLconst -> (ORL y x) + // Combining byte loads into larger (unaligned) loads. // There are many ways these combinations could occur. This is // designed to match the way encoding/binary.LittleEndian does it. - -// Little-endian loads - -(ORL x0:(MOVBload [i0] {s} p mem) - sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - -(ORQ x0:(MOVBload [i0] {s} p mem) - sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - -(ORL x0:(MOVWload [i0] {s} p mem) - sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) - && i1 == i0+2 +(ORL x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - -(ORQ x0:(MOVWload [i0] {s} p mem) - sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - -(ORQ x0:(MOVLload [i0] {s} p mem) - sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 + && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) -(ORL - s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) - or:(ORL - s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 +(ORL o0:(ORL + x0:(MOVWload [i] {s} p mem) + s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) + s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - -(ORQ - s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) - or:(ORQ - s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) + +(ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ + x0:(MOVBload [i] {s} p mem) + s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) + s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) + s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) + s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) + s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) + s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) + s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - -(ORQ - s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) - or:(ORQ - s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) - y)) - && i1 == i0+2 - && j1 == j0+16 - && j0 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) + +(ORL x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - -// Little-endian indexed loads - -(ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) - sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - -(ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) - sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - -(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) - sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - -(ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) - sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) + -> @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) -(ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) - sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - -(ORL - s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) - or:(ORL - s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 +(ORL o0:(ORL + x0:(MOVWloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) + s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - -(ORQ - s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) - or:(ORQ - s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i] {s} p idx mem) + +(ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ + x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) + s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) + s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) + s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) + s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) + s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - -(ORQ - s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) - or:(ORQ - s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) - y)) - && i1 == i0+2 - && j1 == j0+16 - && j0 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) + +// Combine 2 byte loads + shifts into (unaligned) word load + rolw 8 +(ORL + x0:(MOVBload [i] {s} p mem) + s0:(SHLLconst [8] x1:(MOVBload [i-1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - -// Big-endian loads - -(ORL - x1:(MOVBload [i1] {s} p mem) - sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - -(ORQ - x1:(MOVBload [i1] {s} p mem) - sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - -(ORL - r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) - sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - -(ORQ - r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) - sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - -(ORQ - r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) - sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) + -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i-1] {s} p mem)) (ORL - s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) - or:(ORL - s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 + x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLLconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - -(ORQ - s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) - or:(ORQ - s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - -(ORQ - s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) - or:(ORQ - s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) - y)) - && i1 == i0+2 - && j1 == j0-16 - && j1 % 32 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && s0.Uses == 1 + -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i-1] {s} p idx mem)) + +// Combine byte loads + shifts into larger (unaligned) loads + bswap +// (for L version first 2 bytes loads are matched as result of above 2-bytes load+shift rewrite) +(ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWload [i1] {s} p mem)) + s1:(SHLLconst [16] x2:(MOVBload [i1-1] {s} p mem))) + s2:(SHLLconst [24] x3:(MOVBload [i1-2] {s} p mem))) + && x01.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(s0) + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x01,x2,x3) != nil + && clobber(x01) + && clobber(x2) + && clobber(x3) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) - -// Big-endian indexed loads - -(ORL - x1:(MOVBloadidx1 [i1] {s} p idx mem) - sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - -(ORQ - x1:(MOVBloadidx1 [i1] {s} p idx mem) - sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - -(ORL - r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) - sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - -(ORQ - r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) - sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - -(ORQ - r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) - sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) - -(ORL - s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) - or:(ORL - s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x01,x2,x3) (BSWAPL (MOVLload [i1-2] {s} p mem)) + +(ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWloadidx1 [i1] {s} p idx mem)) + s1:(SHLLconst [16] x2:(MOVBloadidx1 [i1-1] {s} p idx mem))) + s2:(SHLLconst [24] x3:(MOVBloadidx1 [i1-2] {s} p idx mem))) + && x01.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(s0) + && s2.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && mergePoint(b,x01,x2,x3) != nil + && clobber(x01) + && clobber(x2) + && clobber(x3) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - -(ORQ - s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) - or:(ORQ - s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 + && clobber(s2) + && clobber(o0) + && clobber(o1) + -> @mergePoint(b,x01,x2,x3) (BSWAPL (MOVLloadidx1 [i1-2] {s} p idx mem)) + +(ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ + x0:(MOVBload [i] {s} p mem) + s0:(SHLQconst [8] x1:(MOVBload [i-1] {s} p mem))) + s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem))) + s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem))) + s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem))) + s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem))) + s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem))) + s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - -(ORQ - s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) - or:(ORQ - s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - y)) - && i1 == i0+2 - && j1 == j0-16 - && j1 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ (MOVQload [i-7] {s} p mem)) + +(ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ + x0:(MOVBloadidx1 [i] {s} p idx mem) + s0:(SHLQconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) + s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) + s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) + s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem))) + s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem))) + s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem))) + s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) - && clobber(r0) - && clobber(r1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ (MOVQloadidx1 [i-7] {s} p idx mem)) // Combine 2 byte stores + shift into rolw 8 + word store (MOVBstore [i] {s} p w @@ -2185,21 +2029,33 @@ // Merge load and op // TODO: add indexed variants? (ADDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDQmem x [off] {sym} ptr mem) +(ADDQ l:(MOVQload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ADDQmem x [off] {sym} ptr mem) (ADDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDLmem x [off] {sym} ptr mem) +(ADDL l:(MOVLload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ADDLmem x [off] {sym} ptr mem) (SUBQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBQmem x [off] {sym} ptr mem) (SUBL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBLmem x [off] {sym} ptr mem) (ANDQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDQmem x [off] {sym} ptr mem) +(ANDQ l:(MOVQload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ANDQmem x [off] {sym} ptr mem) (ANDL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ANDLmem x [off] {sym} ptr mem) -(ORQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORQmem x [off] {sym} ptr mem) -(ORL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORLmem x [off] {sym} ptr mem) +(ANDL l:(MOVLload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ANDLmem x [off] {sym} ptr mem) +(ORQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORQmem x [off] {sym} ptr mem) +(ORQ l:(MOVQload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ORQmem x [off] {sym} ptr mem) +(ORL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ORLmem x [off] {sym} ptr mem) +(ORL l:(MOVLload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ORLmem x [off] {sym} ptr mem) (XORQ x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORQmem x [off] {sym} ptr mem) +(XORQ l:(MOVQload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (XORQmem x [off] {sym} ptr mem) (XORL x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (XORLmem x [off] {sym} ptr mem) +(XORL l:(MOVLload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (XORLmem x [off] {sym} ptr mem) (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSDmem x [off] {sym} ptr mem) +(ADDSD l:(MOVSDload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSDmem x [off] {sym} ptr mem) (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSSmem x [off] {sym} ptr mem) +(ADDSS l:(MOVSSload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (ADDSSmem x [off] {sym} ptr mem) (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSDmem x [off] {sym} ptr mem) (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (SUBSSmem x [off] {sym} ptr mem) (MULSD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSDmem x [off] {sym} ptr mem) +(MULSD l:(MOVSDload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (MULSDmem x [off] {sym} ptr mem) (MULSS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoad(v, l, x) && clobber(l) -> (MULSSmem x [off] {sym} ptr mem) +(MULSS l:(MOVSSload [off] {sym} ptr mem) x) && canMergeLoad(v, l, x) && clobber(l) -> (MULSSmem x [off] {sym} ptr mem) // Merge ADDQconst and LEAQ into atomic loads. (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) -> diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index 6f0845305c..f9731047e7 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -202,10 +202,10 @@ func init() { {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint - {name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width - {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width - {name: "HMULQU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width - {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits @@ -216,8 +216,8 @@ func init() { {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] - {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo) - {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) + {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo) + {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 @@ -251,43 +251,43 @@ func init() { {name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32 {name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64 - {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTQ", argLength: 2, reg: gp2flags, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0 {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 - {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64 - {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32 - {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63 - {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31 + {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64 + {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32 + {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63 + {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31 // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount! - {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64 - {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 - {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 - {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 - {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63 - {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31 - {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15 - {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7 - - {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 - {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 - {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 - {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 - {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 - {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 - {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15 - {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7 - - {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63 - {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31 - {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 - {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 + {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64 + {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63 + {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31 + {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15 + {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7 + + {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 + {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 + {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 + {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15 + {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7 + + {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63 + {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31 + {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 + {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 {name: "ADDLmem", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "ADDQmem", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem @@ -374,7 +374,7 @@ func init() { {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux - {name: "LEAQ1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux {name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux {name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux {name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux @@ -398,21 +398,21 @@ func init() { {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem // indexed loads/stores - {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem - {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem - {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem + {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem + {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem // TODO: sign-extending indexed loads - {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem - {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem - {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem + {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem + {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem // TODO: add size-mismatched indexed loads, like MOVBstoreidx4. // For storeconst ops, the AuxInt field encodes both diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index 0ecb6d09c0..5ab695c76b 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -481,13 +481,16 @@ (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x // fold constant into arithmatic ops +(ADD (MOVWconst [c]) x) -> (ADDconst [c] x) (ADD x (MOVWconst [c])) -> (ADDconst [c] x) (SUB (MOVWconst [c]) x) -> (RSBconst [c] x) (SUB x (MOVWconst [c])) -> (SUBconst [c] x) (RSB (MOVWconst [c]) x) -> (SUBconst [c] x) (RSB x (MOVWconst [c])) -> (RSBconst [c] x) +(ADDS (MOVWconst [c]) x) -> (ADDSconst [c] x) (ADDS x (MOVWconst [c])) -> (ADDSconst [c] x) +(SUBS (MOVWconst [c]) x) -> (RSBSconst [c] x) (SUBS x (MOVWconst [c])) -> (SUBSconst [c] x) (ADC (MOVWconst [c]) x flags) -> (ADCconst [c] x flags) @@ -495,8 +498,11 @@ (SBC (MOVWconst [c]) x flags) -> (RSCconst [c] x flags) (SBC x (MOVWconst [c]) flags) -> (SBCconst [c] x flags) +(AND (MOVWconst [c]) x) -> (ANDconst [c] x) (AND x (MOVWconst [c])) -> (ANDconst [c] x) -(OR x (MOVWconst [c])) -> (ORconst [c] x) +(OR (MOVWconst [c]) x) -> (ORconst [c] x) +(OR x (MOVWconst [c])) -> (ORconst [c] x) +(XOR (MOVWconst [c]) x) -> (XORconst [c] x) (XOR x (MOVWconst [c])) -> (XORconst [c] x) (BIC x (MOVWconst [c])) -> (BICconst [c] x) @@ -556,6 +562,17 @@ (MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) (MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) +(MUL (MOVWconst [c]) x) && int32(c) == -1 -> (RSBconst [0] x) +(MUL (MOVWconst [0]) _) -> (MOVWconst [0]) +(MUL (MOVWconst [1]) x) -> x +(MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) +(MUL (MOVWconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)]) +(MUL (MOVWconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSBshiftLL x x [log2(c+1)]) +(MUL (MOVWconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) +(MUL (MOVWconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) +(MUL (MOVWconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) +(MUL (MOVWconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + (MULA x (MOVWconst [c]) a) && int32(c) == -1 -> (SUB a x) (MULA _ (MOVWconst [0]) a) -> a (MULA x (MOVWconst [1]) a) -> (ADD x a) @@ -818,11 +835,17 @@ // absorb shifts into ops (ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c]) +(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c]) (ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c]) +(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c]) (ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c]) +(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c]) (ADD x (SLL y z)) -> (ADDshiftLLreg x y z) +(ADD (SLL y z) x) -> (ADDshiftLLreg x y z) (ADD x (SRL y z)) -> (ADDshiftRLreg x y z) +(ADD (SRL y z) x) -> (ADDshiftRLreg x y z) (ADD x (SRA y z)) -> (ADDshiftRAreg x y z) +(ADD (SRA y z) x) -> (ADDshiftRAreg x y z) (ADC x (SLLconst [c] y) flags) -> (ADCshiftLL x y [c] flags) (ADC (SLLconst [c] y) x flags) -> (ADCshiftLL x y [c] flags) (ADC x (SRLconst [c] y) flags) -> (ADCshiftRL x y [c] flags) @@ -836,11 +859,17 @@ (ADC x (SRA y z) flags) -> (ADCshiftRAreg x y z flags) (ADC (SRA y z) x flags) -> (ADCshiftRAreg x y z flags) (ADDS x (SLLconst [c] y)) -> (ADDSshiftLL x y [c]) +(ADDS (SLLconst [c] y) x) -> (ADDSshiftLL x y [c]) (ADDS x (SRLconst [c] y)) -> (ADDSshiftRL x y [c]) +(ADDS (SRLconst [c] y) x) -> (ADDSshiftRL x y [c]) (ADDS x (SRAconst [c] y)) -> (ADDSshiftRA x y [c]) +(ADDS (SRAconst [c] y) x) -> (ADDSshiftRA x y [c]) (ADDS x (SLL y z)) -> (ADDSshiftLLreg x y z) +(ADDS (SLL y z) x) -> (ADDSshiftLLreg x y z) (ADDS x (SRL y z)) -> (ADDSshiftRLreg x y z) +(ADDS (SRL y z) x) -> (ADDSshiftRLreg x y z) (ADDS x (SRA y z)) -> (ADDSshiftRAreg x y z) +(ADDS (SRA y z) x) -> (ADDSshiftRAreg x y z) (SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c]) (SUB (SLLconst [c] y) x) -> (RSBshiftLL x y [c]) (SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c]) @@ -890,24 +919,43 @@ (RSB x (SRA y z)) -> (RSBshiftRAreg x y z) (RSB (SRA y z) x) -> (SUBshiftRAreg x y z) (AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c]) +(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c]) (AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c]) +(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c]) (AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c]) +(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c]) (AND x (SLL y z)) -> (ANDshiftLLreg x y z) +(AND (SLL y z) x) -> (ANDshiftLLreg x y z) (AND x (SRL y z)) -> (ANDshiftRLreg x y z) +(AND (SRL y z) x) -> (ANDshiftRLreg x y z) (AND x (SRA y z)) -> (ANDshiftRAreg x y z) +(AND (SRA y z) x) -> (ANDshiftRAreg x y z) (OR x (SLLconst [c] y)) -> (ORshiftLL x y [c]) +(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c]) (OR x (SRLconst [c] y)) -> (ORshiftRL x y [c]) +(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c]) (OR x (SRAconst [c] y)) -> (ORshiftRA x y [c]) +(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c]) (OR x (SLL y z)) -> (ORshiftLLreg x y z) +(OR (SLL y z) x) -> (ORshiftLLreg x y z) (OR x (SRL y z)) -> (ORshiftRLreg x y z) +(OR (SRL y z) x) -> (ORshiftRLreg x y z) (OR x (SRA y z)) -> (ORshiftRAreg x y z) +(OR (SRA y z) x) -> (ORshiftRAreg x y z) (XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c]) +(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c]) (XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c]) +(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c]) (XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c]) +(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c]) (XOR x (SRRconst [c] y)) -> (XORshiftRR x y [c]) +(XOR (SRRconst [c] y) x) -> (XORshiftRR x y [c]) (XOR x (SLL y z)) -> (XORshiftLLreg x y z) +(XOR (SLL y z) x) -> (XORshiftLLreg x y z) (XOR x (SRL y z)) -> (XORshiftRLreg x y z) +(XOR (SRL y z) x) -> (XORshiftRLreg x y z) (XOR x (SRA y z)) -> (XORshiftRAreg x y z) +(XOR (SRA y z) x) -> (XORshiftRAreg x y z) (BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c]) (BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c]) (BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c]) @@ -1159,6 +1207,7 @@ // generic simplifications (ADD x (RSBconst [0] y)) -> (SUB x y) +(ADD (RSBconst [0] y) x) -> (SUB x y) (ADD (RSBconst [c] x) (RSBconst [d] y)) -> (RSBconst [c+d] (ADD x y)) (SUB x x) -> (MOVWconst [0]) (RSB x x) -> (MOVWconst [0]) @@ -1168,8 +1217,10 @@ (BIC x x) -> (MOVWconst [0]) (ADD (MUL x y) a) -> (MULA x y a) +(ADD a (MUL x y)) -> (MULA x y a) (AND x (MVN y)) -> (BIC x y) +(AND (MVN y) x) -> (BIC x y) // simplification with *shift ops (SUBshiftLL x (SLLconst x [c]) [d]) && c==d -> (MOVWconst [0]) @@ -1191,8 +1242,11 @@ (BICshiftRL x (SRLconst x [c]) [d]) && c==d -> (MOVWconst [0]) (BICshiftRA x (SRAconst x [c]) [d]) && c==d -> (MOVWconst [0]) (AND x (MVNshiftLL y [c])) -> (BICshiftLL x y [c]) +(AND (MVNshiftLL y [c]) x) -> (BICshiftLL x y [c]) (AND x (MVNshiftRL y [c])) -> (BICshiftRL x y [c]) +(AND (MVNshiftRL y [c]) x) -> (BICshiftRL x y [c]) (AND x (MVNshiftRA y [c])) -> (BICshiftRA x y [c]) +(AND (MVNshiftRA y [c]) x) -> (BICshiftRA x y [c]) // floating point optimizations (CMPF x (MOVFconst [0])) -> (CMPF0 x) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 6adbaf5ba2..41661082c7 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -752,10 +752,14 @@ (MOVDreg x) && x.Uses == 1 -> (MOVDnop x) // fold constant into arithmatic ops +(ADD (MOVDconst [c]) x) -> (ADDconst [c] x) (ADD x (MOVDconst [c])) -> (ADDconst [c] x) (SUB x (MOVDconst [c])) -> (SUBconst [c] x) +(AND (MOVDconst [c]) x) -> (ANDconst [c] x) (AND x (MOVDconst [c])) -> (ANDconst [c] x) +(OR (MOVDconst [c]) x) -> (ORconst [c] x) (OR x (MOVDconst [c])) -> (ORconst [c] x) +(XOR (MOVDconst [c]) x) -> (XORconst [c] x) (XOR x (MOVDconst [c])) -> (XORconst [c] x) (BIC x (MOVDconst [c])) -> (BICconst [c] x) @@ -780,6 +784,18 @@ (MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) (MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) +(MUL (MOVDconst [-1]) x) -> (NEG x) +(MUL (MOVDconst [0]) _) -> (MOVDconst [0]) +(MUL (MOVDconst [1]) x) -> x +(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) +(MUL (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) +(MUL (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)]) +(MUL (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG x) x [log2(c+1)]) +(MUL (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) +(MUL (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) +(MUL (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) +(MUL (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + (MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x) (MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0]) (MULW x (MOVDconst [c])) && int32(c)==1 -> x @@ -791,6 +807,17 @@ (MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) (MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) +(MULW (MOVDconst [c]) x) && int32(c)==-1 -> (NEG x) +(MULW (MOVDconst [c]) _) && int32(c)==0 -> (MOVDconst [0]) +(MULW (MOVDconst [c]) x) && int32(c)==1 -> x +(MULW (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) +(MULW (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)]) +(MULW (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG x) x [log2(c+1)]) +(MULW (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) +(MULW (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) +(MULW (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) +(MULW (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + // div by constant (UDIV x (MOVDconst [1])) -> x (UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x) @@ -803,6 +830,7 @@ // generic simplifications (ADD x (NEG y)) -> (SUB x y) +(ADD (NEG y) x) -> (SUB x y) (SUB x x) -> (MOVDconst [0]) (AND x x) -> x (OR x x) -> x @@ -1052,20 +1080,34 @@ // absorb shifts into ops (ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c]) +(ADD (SLLconst [c] y) x) -> (ADDshiftLL x y [c]) (ADD x (SRLconst [c] y)) -> (ADDshiftRL x y [c]) +(ADD (SRLconst [c] y) x) -> (ADDshiftRL x y [c]) (ADD x (SRAconst [c] y)) -> (ADDshiftRA x y [c]) +(ADD (SRAconst [c] y) x) -> (ADDshiftRA x y [c]) (SUB x (SLLconst [c] y)) -> (SUBshiftLL x y [c]) (SUB x (SRLconst [c] y)) -> (SUBshiftRL x y [c]) (SUB x (SRAconst [c] y)) -> (SUBshiftRA x y [c]) (AND x (SLLconst [c] y)) -> (ANDshiftLL x y [c]) +(AND (SLLconst [c] y) x) -> (ANDshiftLL x y [c]) (AND x (SRLconst [c] y)) -> (ANDshiftRL x y [c]) +(AND (SRLconst [c] y) x) -> (ANDshiftRL x y [c]) (AND x (SRAconst [c] y)) -> (ANDshiftRA x y [c]) -(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c]) // useful for combined load +(AND (SRAconst [c] y) x) -> (ANDshiftRA x y [c]) +(OR x s:(SLLconst [c] y)) && s.Uses == 1 && clobber(s) -> (ORshiftLL x y [c]) // useful for combined load +(OR s:(SLLconst [c] y) x) && s.Uses == 1 && clobber(s) -> (ORshiftLL x y [c]) +(OR x (SLLconst [c] y)) -> (ORshiftLL x y [c]) +(OR (SLLconst [c] y) x) -> (ORshiftLL x y [c]) (OR x (SRLconst [c] y)) -> (ORshiftRL x y [c]) +(OR (SRLconst [c] y) x) -> (ORshiftRL x y [c]) (OR x (SRAconst [c] y)) -> (ORshiftRA x y [c]) +(OR (SRAconst [c] y) x) -> (ORshiftRA x y [c]) (XOR x (SLLconst [c] y)) -> (XORshiftLL x y [c]) +(XOR (SLLconst [c] y) x) -> (XORshiftLL x y [c]) (XOR x (SRLconst [c] y)) -> (XORshiftRL x y [c]) +(XOR (SRLconst [c] y) x) -> (XORshiftRL x y [c]) (XOR x (SRAconst [c] y)) -> (XORshiftRA x y [c]) +(XOR (SRAconst [c] y) x) -> (XORshiftRA x y [c]) (BIC x (SLLconst [c] y)) -> (BICshiftLL x y [c]) (BIC x (SRLconst [c] y)) -> (BICshiftRL x y [c]) (BIC x (SRAconst [c] y)) -> (BICshiftRA x y [c]) @@ -1152,23 +1194,20 @@ // little endian loads // b[0] | b[1]<<8 -> load 16-bit (ORshiftLL [8] - y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) - y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - && i1 == i0+1 + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)) + y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - -> @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i0] p) mem) + -> @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i] p) mem) // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit (ORshiftLL [24] o0:(ORshiftLL [16] - x0:(MOVHUload [i0] {s} p mem) - y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) - && i2 == i0+2 - && i3 == i0+3 + x0:(MOVHUload [i] {s} p mem) + y1:(MOVDnop x1:(MOVBUload [i+2] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 @@ -1176,19 +1215,15 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - -> @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i0] p) mem) + -> @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i] p) mem) // b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] - x0:(MOVWUload [i0] {s} p mem) - y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) - y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) - && i4 == i0+4 - && i5 == i0+5 - && i6 == i0+6 - && i7 == i0+7 + x0:(MOVWUload [i] {s} p mem) + y1:(MOVDnop x1:(MOVBUload [i+4] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i+5] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i+6] {s} p mem))) + y4:(MOVDnop x4:(MOVBUload [i+7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 @@ -1196,17 +1231,14 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i0] p) mem) + -> @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i] p) mem) // b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 32-bit (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] - y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) - y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 - && i3 == i0+3 + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) + y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 @@ -1214,25 +1246,18 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - -> @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + -> @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i-3] p) mem) // b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] -> load 64-bit, reverse (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] - y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) - y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) - y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) - y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) - y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) - y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 - && i3 == i0+3 - && i4 == i0+4 - && i5 == i0+5 - && i6 == i0+6 - && i7 == i0+7 + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) + y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) + y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem))) + y5:(MOVDnop x5:(MOVBUload [i-5] {s} p mem))) + y6:(MOVDnop x6:(MOVBUload [i-6] {s} p mem))) + y7:(MOVDnop x7:(MOVBUload [i-7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 @@ -1246,29 +1271,26 @@ && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i-7] p) mem)) // big endian loads // b[1] | b[0]<<8 -> load 16-bit, reverse (ORshiftLL [8] - y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) - y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) - && i1 == i0+1 - && (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s)) + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)) + y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + && ((i-1)%2 == 0 || i-1<256 && i-1>-256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - -> @mergePoint(b,x0,x1) (REV16W (MOVHUload [i0] {s} p mem)) + -> @mergePoint(b,x0,x1) (REV16W (MOVHUload [i-1] {s} p mem)) // b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit, reverse (ORshiftLL [24] o0:(ORshiftLL [16] - y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) - y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 + y0:(REV16W x0:(MOVHUload [i] {s} p mem)) + y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 @@ -1276,19 +1298,15 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - -> @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + -> @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i-2] p) mem)) // b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit, reverse (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] - y0:(REVW x0:(MOVWUload [i4] {s} p mem)) - y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) - y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 - && i3 == i0+3 - && i4 == i0+4 + y0:(REVW x0:(MOVWUload [i] {s} p mem)) + y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) + y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 @@ -1296,17 +1314,14 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - -> @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + -> @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i-4] p) mem)) // b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] -> load 32-bit, reverse (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] - y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) - y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 - && i3 == i0+3 + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) + y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 @@ -1314,25 +1329,18 @@ && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - -> @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + -> @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i] p) mem)) // b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] -> load 64-bit, reverse (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] - y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) - y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) - y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) - y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) - y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) - y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) - y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) - && i1 == i0+1 - && i2 == i0+2 - && i3 == i0+3 - && i4 == i0+4 - && i5 == i0+5 - && i6 == i0+6 - && i7 == i0+7 + y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) + y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) + y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) + y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem))) + y4:(MOVDnop x4:(MOVBUload [i+4] {s} p mem))) + y5:(MOVDnop x5:(MOVBUload [i+5] {s} p mem))) + y6:(MOVDnop x6:(MOVBUload [i+6] {s} p mem))) + y7:(MOVDnop x7:(MOVBUload [i+7] {s} p mem))) && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 @@ -1346,4 +1354,4 @@ && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i] p) mem)) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules index c4130aa57a..3f40951d3b 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules @@ -591,11 +591,16 @@ (MOVWreg x) && x.Uses == 1 -> (MOVWnop x) // fold constant into arithmatic ops +(ADD (MOVWconst [c]) x) -> (ADDconst [c] x) (ADD x (MOVWconst [c])) -> (ADDconst [c] x) (SUB x (MOVWconst [c])) -> (SUBconst [c] x) +(AND (MOVWconst [c]) x) -> (ANDconst [c] x) (AND x (MOVWconst [c])) -> (ANDconst [c] x) +(OR (MOVWconst [c]) x) -> (ORconst [c] x) (OR x (MOVWconst [c])) -> (ORconst [c] x) +(XOR (MOVWconst [c]) x) -> (XORconst [c] x) (XOR x (MOVWconst [c])) -> (XORconst [c] x) +(NOR (MOVWconst [c]) x) -> (NORconst [c] x) (NOR x (MOVWconst [c])) -> (NORconst [c] x) (SLL _ (MOVWconst [c])) && uint32(c)>=32 -> (MOVWconst [0]) @@ -630,6 +635,7 @@ // generic simplifications (ADD x (NEG y)) -> (SUB x y) +(ADD (NEG y) x) -> (SUB x y) (SUB x x) -> (MOVWconst [0]) (SUB (MOVWconst [0]) x) -> (NEG x) (AND x x) -> x @@ -723,12 +729,12 @@ // conditional move (CMOVZ _ b (MOVWconst [0])) -> b -(CMOVZ a _ (MOVWconst [c])) && c!=0 -> a +(CMOVZ a _ (MOVWconst [c])) && c!=0-> a (CMOVZzero _ (MOVWconst [0])) -> (MOVWconst [0]) -(CMOVZzero a (MOVWconst [c])) && c!=0 -> a +(CMOVZzero a (MOVWconst [c])) && c!=0-> a (CMOVZ a (MOVWconst [0]) c) -> (CMOVZzero a c) // atomic (LoweredAtomicStore ptr (MOVWconst [0]) mem) -> (LoweredAtomicStorezero ptr mem) -(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c) -> (LoweredAtomicAddconst [c] ptr mem) +(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(c)-> (LoweredAtomicAddconst [c] ptr mem) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 6dd5461f1f..42b0dc51bb 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -579,11 +579,16 @@ (MOVVreg x) && x.Uses == 1 -> (MOVVnop x) // fold constant into arithmatic ops +(ADDV (MOVVconst [c]) x) && is32Bit(c) -> (ADDVconst [c] x) (ADDV x (MOVVconst [c])) && is32Bit(c) -> (ADDVconst [c] x) (SUBV x (MOVVconst [c])) && is32Bit(c) -> (SUBVconst [c] x) +(AND (MOVVconst [c]) x) && is32Bit(c) -> (ANDconst [c] x) (AND x (MOVVconst [c])) && is32Bit(c) -> (ANDconst [c] x) +(OR (MOVVconst [c]) x) && is32Bit(c) -> (ORconst [c] x) (OR x (MOVVconst [c])) && is32Bit(c) -> (ORconst [c] x) +(XOR (MOVVconst [c]) x) && is32Bit(c) -> (XORconst [c] x) (XOR x (MOVVconst [c])) && is32Bit(c) -> (XORconst [c] x) +(NOR (MOVVconst [c]) x) && is32Bit(c) -> (NORconst [c] x) (NOR x (MOVVconst [c])) && is32Bit(c) -> (NORconst [c] x) (SLLV _ (MOVVconst [c])) && uint64(c)>=64 -> (MOVVconst [0]) @@ -615,6 +620,7 @@ // generic simplifications (ADDV x (NEGV y)) -> (SUBV x y) +(ADDV (NEGV y) x) -> (SUBV x y) (SUBV x x) -> (MOVVconst [0]) (SUBV (MOVVconst [0]) x) -> (NEGV x) (AND x x) -> x diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 3601820993..a44e50629d 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -577,7 +577,7 @@ (Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 -> (MOVWstore [4] dst (MOVWZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)) -(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 -> +(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0-> (MOVHstore [6] dst (MOVHZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVHstore [2] dst (MOVHZload [2] src mem) @@ -624,6 +624,9 @@ (AND x (MOVDconst [c])) && isU16Bit(c) -> (ANDconst [c] x) (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x) (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x) +(AND (MOVDconst [c]) x) && isU16Bit(c) -> (ANDconst [c] x) +(XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x) +(OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x) // Simplify consts (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) @@ -689,6 +692,7 @@ // Arithmetic constant ops +(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x) (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) -> (ADDconst [c+d] x) (ADDconst [0] x) -> x @@ -860,7 +864,9 @@ (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x) // floating-point fused multiply-add/sub +(FADD z (FMUL x y)) -> (FMADD x y z) (FADD (FMUL x y) z) -> (FMADD x y z) (FSUB (FMUL x y) z) -> (FMSUB x y z) +(FADDS z (FMULS x y)) -> (FMADDS x y z) (FADDS (FMULS x y) z) -> (FMADDS x y z) (FSUBS (FMULS x y) z) -> (FMSUBS x y z) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 93d2981be3..387584dbda 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -224,7 +224,7 @@ func init() { {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 - {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR"}, // ^(arg0|arg1) {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 3c62656ba6..ef96cc0d82 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -514,7 +514,9 @@ // Fold constants into instructions. (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) +(ADD (MOVDconst [c]) x) && is32Bit(c) -> (ADDconst [c] x) (ADDW x (MOVDconst [c])) -> (ADDWconst [c] x) +(ADDW (MOVDconst [c]) x) -> (ADDWconst [c] x) (SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c]) (SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst x [c])) @@ -522,23 +524,31 @@ (SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst x [c])) (MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x) +(MULLD (MOVDconst [c]) x) && is32Bit(c) -> (MULLDconst [c] x) (MULLW x (MOVDconst [c])) -> (MULLWconst [c] x) +(MULLW (MOVDconst [c]) x) -> (MULLWconst [c] x) // NILF instructions leave the high 32 bits unchanged which is // equivalent to the leftmost 32 bits being set. // TODO(mundaym): modify the assembler to accept 64-bit values // and use isU32Bit(^c). (AND x (MOVDconst [c])) && is32Bit(c) && c < 0 -> (ANDconst [c] x) +(AND (MOVDconst [c]) x) && is32Bit(c) && c < 0 -> (ANDconst [c] x) (ANDW x (MOVDconst [c])) -> (ANDWconst [c] x) +(ANDW (MOVDconst [c]) x) -> (ANDWconst [c] x) (ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x) (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x) +(OR (MOVDconst [c]) x) && isU32Bit(c) -> (ORconst [c] x) (ORW x (MOVDconst [c])) -> (ORWconst [c] x) +(ORW (MOVDconst [c]) x) -> (ORWconst [c] x) (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x) +(XOR (MOVDconst [c]) x) && isU32Bit(c) -> (XORconst [c] x) (XORW x (MOVDconst [c])) -> (XORWconst [c] x) +(XORW (MOVDconst [c]) x) -> (XORWconst [c] x) (SLD x (MOVDconst [c])) -> (SLDconst [c&63] x) (SLW x (MOVDconst [c])) -> (SLWconst [c&63] x) @@ -555,13 +565,19 @@ (SRD x (ANDconst [63] y)) -> (SRD x y) // Rotate generation -(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x) -( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x) -(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (RLLGconst [c] x) - -(ADDW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x) -( ORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x) -(XORW (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (RLLconst [c] x) +(ADD (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x) +( OR (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x) +(XOR (SLDconst x [c]) (SRDconst x [64-c])) -> (RLLGconst [ c] x) +(ADD (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x) +( OR (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x) +(XOR (SRDconst x [c]) (SLDconst x [64-c])) -> (RLLGconst [64-c] x) + +(ADDW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x) +( ORW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x) +(XORW (SLWconst x [c]) (SRWconst x [32-c])) -> (RLLconst [ c] x) +(ADDW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x) +( ORW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x) +(XORW (SRWconst x [c]) (SLWconst x [32-c])) -> (RLLconst [32-c] x) (CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c]) (CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c])) @@ -573,8 +589,11 @@ (CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))])) // Using MOV{W,H,B}Zreg instead of AND is cheaper. +(AND (MOVDconst [0xFF]) x) -> (MOVBZreg x) (AND x (MOVDconst [0xFF])) -> (MOVBZreg x) +(AND (MOVDconst [0xFFFF]) x) -> (MOVHZreg x) (AND x (MOVDconst [0xFFFF])) -> (MOVHZreg x) +(AND (MOVDconst [0xFFFFFFFF]) x) -> (MOVWZreg x) (AND x (MOVDconst [0xFFFFFFFF])) -> (MOVWZreg x) (ANDWconst [0xFF] x) -> (MOVBZreg x) (ANDWconst [0xFFFF] x) -> (MOVHZreg x) @@ -598,6 +617,7 @@ (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(c+d) -> (MOVDaddr [c+d] {s} x) (ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(c+d) -> (MOVDaddr [c+d] {s} x) (ADD x (MOVDaddr [c] {s} y)) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y) +(ADD (MOVDaddr [c] {s} x) y) && x.Op != OpSB && y.Op != OpSB -> (MOVDaddridx [c] {s} x y) // fold ADDconst into MOVDaddrx (ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(c+d) -> (MOVDaddridx [c+d] {s} x y) @@ -1007,6 +1027,8 @@ (NEG (ADDconst [c] (NEG x))) && c != -(1<<31) -> (ADDconst [-c] x) // fused multiply-add +(FADD x (FMUL y z)) -> (FMADD x y z) +(FADDS x (FMULS y z)) -> (FMADDS x y z) (FADD (FMUL y z) x) -> (FMADD x y z) (FADDS (FMULS y z) x) -> (FMADDS x y z) (FSUB (FMUL y z) x) -> (FMSUB x y z) @@ -1289,549 +1311,369 @@ && clobber(x) -> (MOVDBRstoreidx [i-4] {s} p idx w0 mem) -// Combining byte loads into larger (unaligned) loads. +// Move shifts to second argument of ORs. Helps load combining rules below. +(ORW x:(SLWconst _) y) && y.Op != OpS390XSLWconst -> (ORW y x) +(OR x:(SLDconst _) y) && y.Op != OpS390XSLDconst -> (OR y x) -// Big-endian loads +// Combining byte loads into larger (unaligned) loads. -(ORW x1:(MOVBZload [i1] {s} p mem) - sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem))) - && i1 == i0+1 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) +// Little endian loads. -(OR x1:(MOVBZload [i1] {s} p mem) - sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem))) - && i1 == i0+1 +// b[0] | b[1]<<8 -> load 16-bit, reverse bytes +(ORW x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) - -(ORW x1:(MOVHZload [i1] {s} p mem) - sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) - -(OR x1:(MOVHZload [i1] {s} p mem) - sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 + && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) + && clobber(s0) + -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem)) -(OR x1:(MOVWZload [i1] {s} p mem) - sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem))) - && i1 == i0+4 +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes +(ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRload [i] {s} p mem)) + s0:(SLWconst [16] x1:(MOVBZload [i+2] {s} p mem))) + s1:(SLWconst [24] x2:(MOVBZload [i+3] {s} p mem))) && p.Op != OpSB + && z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) - -(ORW - s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) - or:(ORW - s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil + && clobber(z0) && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVWBRload [i] {s} p mem) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZload [i] {s} p mem) + s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem))) + s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem))) + s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem))) + s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem))) + s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem))) + s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem))) + s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem))) + && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) - y)) - && i1 == i0+2 - && j1 == j0-16 - && j1 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem) + +// b[0] | b[1]<<8 -> load 16-bit, reverse bytes +(ORW x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - -// Big-endian indexed loads - -(ORW x1:(MOVBZloadidx [i1] {s} p idx mem) - sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - && i1 == i0+1 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i] {s} p idx mem)) -(OR x1:(MOVBZloadidx [i1] {s} p idx mem) - sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - && i1 == i0+1 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - -(ORW x1:(MOVHZloadidx [i1] {s} p idx mem) - sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - -(OR x1:(MOVHZloadidx [i1] {s} p idx mem) - sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - && i1 == i0+2 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - -(OR x1:(MOVWZloadidx [i1] {s} p idx mem) - sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - && i1 == i0+4 - && p.Op != OpSB - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - -(ORW - s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - or:(ORW - s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit, reverse bytes +(ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRloadidx [i] {s} p idx mem)) + s0:(SLWconst [16] x1:(MOVBZloadidx [i+2] {s} p idx mem))) + s1:(SLWconst [24] x2:(MOVBZloadidx [i+3] {s} p idx mem))) + && z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil + && clobber(z0) && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0-8 - && j1 % 16 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVWZreg (MOVWBRloadidx [i] {s} p idx mem)) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit, reverse bytes +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) + s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) + s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem))) + s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem))) + s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem))) + s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - -(OR - s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) - or:(OR - s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) - y)) - && i1 == i0+2 - && j1 == j0-16 - && j1 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx [i] {s} p idx mem) + +// Big endian loads. + +// b[1] | b[0]<<8 -> load 16-bit +(ORW x0:(MOVBZload [i] {s} p mem) + s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) + && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - -// Little-endian loads - -(ORW x0:(MOVBZload [i0] {s} p mem) - sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) + -> @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem) -(OR x0:(MOVBZload [i0] {s} p mem) - sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) - -(ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) - sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) - -(OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) - sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) - -(OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) - sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem)))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) - -(ORW - s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) - or:(ORW - s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 +// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit +(ORW o0:(ORW x0:(MOVHZload [i] {s} p mem) + s0:(SLWconst [16] x1:(MOVBZload [i-1] {s} p mem))) + s1:(SLWconst [24] x2:(MOVBZload [i-2] {s} p mem))) + && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - -(OR - s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) - or:(OR - s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVWZload [i-2] {s} p mem) + +// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZload [i] {s} p mem) + s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem))) + s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem))) + s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem))) + s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem))) + s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem))) + s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem))) + s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem))) + && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - -(OR - s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) - or:(OR - s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) - y)) - && i1 == i0+2 - && j1 == j0+16 - && j0 % 32 == 0 + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem) + +// b[1] | b[0]<<8 -> load 16-bit +(ORW x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) - && clobber(r0) - && clobber(r1) && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - -// Little-endian indexed loads - -(ORW x0:(MOVBZloadidx [i0] {s} p idx mem) - sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) + -> @mergePoint(b,x0,x1) (MOVHZloadidx [i-1] {s} p idx mem) -(OR x0:(MOVBZloadidx [i0] {s} p idx mem) - sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - && i1 == i0+1 - && x0.Uses == 1 - && x1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - -(ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) - sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - -(OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) - sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+2 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - -(OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) - sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - && i1 == i0+4 - && x0.Uses == 1 - && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 - && sh.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(r0) - && clobber(r1) - && clobber(sh) - -> @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - -(ORW - s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - or:(ORW - s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 - && x0.Uses == 1 - && x1.Uses == 1 - && s0.Uses == 1 - && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil - && clobber(x0) - && clobber(x1) - && clobber(s0) - && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - -(OR - s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) - or:(OR - s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) - y)) - && i1 == i0+1 - && j1 == j0+8 - && j0 % 16 == 0 +// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 -> load 32-bit +(ORW o0:(ORW x0:(MOVHZloadidx [i] {s} p idx mem) + s0:(SLWconst [16] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + s1:(SLWconst [24] x2:(MOVBZloadidx [i-2] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 + && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && o0.Uses == 1 + && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) + && clobber(x2) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - -(OR - s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) - or:(OR - s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - y)) - && i1 == i0+2 - && j1 == j0+16 - && j0 % 32 == 0 + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVWZloadidx [i-2] {s} p idx mem) + +// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 -> load 64-bit +(OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR + x0:(MOVBZloadidx [i] {s} p idx mem) + s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) + s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) + s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem))) + s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem))) + s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem))) + s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem))) && x0.Uses == 1 && x1.Uses == 1 - && r0.Uses == 1 - && r1.Uses == 1 + && x2.Uses == 1 + && x3.Uses == 1 + && x4.Uses == 1 + && x5.Uses == 1 + && x6.Uses == 1 + && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 - && or.Uses == 1 - && mergePoint(b,x0,x1) != nil + && s2.Uses == 1 + && s3.Uses == 1 + && s4.Uses == 1 + && s5.Uses == 1 + && s6.Uses == 1 + && o0.Uses == 1 + && o1.Uses == 1 + && o2.Uses == 1 + && o3.Uses == 1 + && o4.Uses == 1 + && o5.Uses == 1 + && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) - && clobber(r0) - && clobber(r1) + && clobber(x2) + && clobber(x3) + && clobber(x4) + && clobber(x5) + && clobber(x6) + && clobber(x7) && clobber(s0) && clobber(s1) - && clobber(or) - -> @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + && clobber(s2) + && clobber(s3) + && clobber(s4) + && clobber(s5) + && clobber(s6) + && clobber(o0) + && clobber(o1) + && clobber(o2) + && clobber(o3) + && clobber(o4) + && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx [i-7] {s} p idx mem) // Combine stores into store multiples. // 32-bit diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index 07e7dd24c8..7765d9792f 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -216,8 +216,8 @@ func init() { {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem - {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width - {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", typ: "Int64", resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 @@ -265,24 +265,24 @@ func init() { {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32 {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64 - {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64 - {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32 - {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int8"}, // arg0 << auxint, shift amount 0-63 - {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int8"}, // arg0 << auxint, shift amount 0-31 + {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 32 + {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxint, shift amount 0-63 + {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int32"}, // arg0 << auxint, shift amount 0-31 - {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64 - {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32 - {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-63 - {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int8"}, // unsigned arg0 >> auxint, shift amount 0-31 + {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64 + {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxint, shift amount 0-63 + {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int32"}, // unsigned arg0 >> auxint, shift amount 0-31 // Arithmetic shifts clobber flags. - {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 - {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 - {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 - {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 + {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 + {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 + {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int32", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 - {name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-63 - {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int8"}, // arg0 rotate left auxint, rotate amount 0-31 + {name: "RLLGconst", argLength: 1, reg: gp11, asm: "RLLG", aux: "Int64"}, // arg0 rotate left auxint, rotate amount 0-63 + {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "Int32"}, // arg0 rotate left auxint, rotate amount 0-31 // unary ops {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0 @@ -364,20 +364,20 @@ func init() { // indexed loads/stores // TODO(mundaym): add sign-extended indexed loads - {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. - {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. - {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. - {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem - {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. - {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. - {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", aux: "SymOff", clobberFlags: true, symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. // For storeconst ops, the AuxInt field encodes both // the value to store and an address offset of the store. diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 95d5c7766e..86d0fcab32 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -244,12 +244,48 @@ (Neq16 (Const16 [c]) (Add16 (Const16 [d]) x)) -> (Neq16 (Const16 [int64(int16(c-d))]) x) (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) -> (Neq8 (Const8 [int64(int8(c-d))]) x) -// Canonicalize x-const to x+(-const) +// canonicalize: swap arguments for commutative operations when one argument is a constant. +(Eq64 x (Const64 [c])) && x.Op != OpConst64 -> (Eq64 (Const64 [c]) x) +(Eq32 x (Const32 [c])) && x.Op != OpConst32 -> (Eq32 (Const32 [c]) x) +(Eq16 x (Const16 [c])) && x.Op != OpConst16 -> (Eq16 (Const16 [c]) x) +(Eq8 x (Const8 [c])) && x.Op != OpConst8 -> (Eq8 (Const8 [c]) x) + +(Neq64 x (Const64 [c])) && x.Op != OpConst64 -> (Neq64 (Const64 [c]) x) +(Neq32 x (Const32 [c])) && x.Op != OpConst32 -> (Neq32 (Const32 [c]) x) +(Neq16 x (Const16 [c])) && x.Op != OpConst16 -> (Neq16 (Const16 [c]) x) +(Neq8 x (Const8 [c])) && x.Op != OpConst8 -> (Neq8 (Const8 [c]) x) + +// AddPtr is not canonicalized because nilcheck ptr checks the first argument to be non-nil. +(Add64 x (Const64 [c])) && x.Op != OpConst64 -> (Add64 (Const64 [c]) x) +(Add32 x (Const32 [c])) && x.Op != OpConst32 -> (Add32 (Const32 [c]) x) +(Add16 x (Const16 [c])) && x.Op != OpConst16 -> (Add16 (Const16 [c]) x) +(Add8 x (Const8 [c])) && x.Op != OpConst8 -> (Add8 (Const8 [c]) x) + +(Mul64 x (Const64 [c])) && x.Op != OpConst64 -> (Mul64 (Const64 [c]) x) +(Mul32 x (Const32 [c])) && x.Op != OpConst32 -> (Mul32 (Const32 [c]) x) +(Mul16 x (Const16 [c])) && x.Op != OpConst16 -> (Mul16 (Const16 [c]) x) +(Mul8 x (Const8 [c])) && x.Op != OpConst8 -> (Mul8 (Const8 [c]) x) + (Sub64 x (Const64 [c])) && x.Op != OpConst64 -> (Add64 (Const64 [-c]) x) (Sub32 x (Const32 [c])) && x.Op != OpConst32 -> (Add32 (Const32 [int64(int32(-c))]) x) (Sub16 x (Const16 [c])) && x.Op != OpConst16 -> (Add16 (Const16 [int64(int16(-c))]) x) (Sub8 x (Const8 [c])) && x.Op != OpConst8 -> (Add8 (Const8 [int64(int8(-c))]) x) +(And64 x (Const64 [c])) && x.Op != OpConst64 -> (And64 (Const64 [c]) x) +(And32 x (Const32 [c])) && x.Op != OpConst32 -> (And32 (Const32 [c]) x) +(And16 x (Const16 [c])) && x.Op != OpConst16 -> (And16 (Const16 [c]) x) +(And8 x (Const8 [c])) && x.Op != OpConst8 -> (And8 (Const8 [c]) x) + +(Or64 x (Const64 [c])) && x.Op != OpConst64 -> (Or64 (Const64 [c]) x) +(Or32 x (Const32 [c])) && x.Op != OpConst32 -> (Or32 (Const32 [c]) x) +(Or16 x (Const16 [c])) && x.Op != OpConst16 -> (Or16 (Const16 [c]) x) +(Or8 x (Const8 [c])) && x.Op != OpConst8 -> (Or8 (Const8 [c]) x) + +(Xor64 x (Const64 [c])) && x.Op != OpConst64 -> (Xor64 (Const64 [c]) x) +(Xor32 x (Const32 [c])) && x.Op != OpConst32 -> (Xor32 (Const32 [c]) x) +(Xor16 x (Const16 [c])) && x.Op != OpConst16 -> (Xor16 (Const16 [c]) x) +(Xor8 x (Const8 [c])) && x.Op != OpConst8 -> (Xor8 (Const8 [c]) x) + // fold negation into comparison operators (Not (Eq64 x y)) -> (Neq64 x y) (Not (Eq32 x y)) -> (Neq32 x y) @@ -599,14 +635,50 @@ (And32 x (And32 x y)) -> (And32 x y) (And16 x (And16 x y)) -> (And16 x y) (And8 x (And8 x y)) -> (And8 x y) +(And64 x (And64 y x)) -> (And64 x y) +(And32 x (And32 y x)) -> (And32 x y) +(And16 x (And16 y x)) -> (And16 x y) +(And8 x (And8 y x)) -> (And8 x y) +(And64 (And64 x y) x) -> (And64 x y) +(And32 (And32 x y) x) -> (And32 x y) +(And16 (And16 x y) x) -> (And16 x y) +(And8 (And8 x y) x) -> (And8 x y) +(And64 (And64 x y) y) -> (And64 x y) +(And32 (And32 x y) y) -> (And32 x y) +(And16 (And16 x y) y) -> (And16 x y) +(And8 (And8 x y) y) -> (And8 x y) (Or64 x (Or64 x y)) -> (Or64 x y) (Or32 x (Or32 x y)) -> (Or32 x y) (Or16 x (Or16 x y)) -> (Or16 x y) (Or8 x (Or8 x y)) -> (Or8 x y) +(Or64 x (Or64 y x)) -> (Or64 x y) +(Or32 x (Or32 y x)) -> (Or32 x y) +(Or16 x (Or16 y x)) -> (Or16 x y) +(Or8 x (Or8 y x)) -> (Or8 x y) +(Or64 (Or64 x y) x) -> (Or64 x y) +(Or32 (Or32 x y) x) -> (Or32 x y) +(Or16 (Or16 x y) x) -> (Or16 x y) +(Or8 (Or8 x y) x) -> (Or8 x y) +(Or64 (Or64 x y) y) -> (Or64 x y) +(Or32 (Or32 x y) y) -> (Or32 x y) +(Or16 (Or16 x y) y) -> (Or16 x y) +(Or8 (Or8 x y) y) -> (Or8 x y) (Xor64 x (Xor64 x y)) -> y (Xor32 x (Xor32 x y)) -> y (Xor16 x (Xor16 x y)) -> y (Xor8 x (Xor8 x y)) -> y +(Xor64 x (Xor64 y x)) -> y +(Xor32 x (Xor32 y x)) -> y +(Xor16 x (Xor16 y x)) -> y +(Xor8 x (Xor8 y x)) -> y +(Xor64 (Xor64 x y) x) -> y +(Xor32 (Xor32 x y) x) -> y +(Xor16 (Xor16 x y) x) -> y +(Xor8 (Xor8 x y) x) -> y +(Xor64 (Xor64 x y) y) -> x +(Xor32 (Xor32 x y) y) -> x +(Xor16 (Xor16 x y) y) -> x +(Xor8 (Xor8 x y) y) -> x (Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x) (Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x) @@ -660,7 +732,9 @@ // user nil checks (NeqPtr p (ConstNil)) -> (IsNonNil p) +(NeqPtr (ConstNil) p) -> (IsNonNil p) (EqPtr p (ConstNil)) -> (Not (IsNonNil p)) +(EqPtr (ConstNil) p) -> (Not (IsNonNil p)) (IsNonNil (ConstNil)) -> (ConstBool [0]) // slice and interface comparisons @@ -838,6 +912,7 @@ // Get rid of Convert ops for pointer arithmetic on unsafe.Pointer. (Convert (Add64 (Convert ptr mem) off) mem) -> (Add64 ptr off) +(Convert (Add64 off (Convert ptr mem)) mem) -> (Add64 ptr off) (Convert (Convert ptr mem) mem) -> ptr // Decompose compound argument values @@ -1163,11 +1238,35 @@ // Reassociate expressions involving // constants such that constants come first, // exposing obvious constant-folding opportunities. -// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C +// First, re-write (op x (op y z)) to (op (op y z) x) if +// the op is commutative, to reduce the number of subsequent +// matching rules for folding. Then, reassociate +// (op (op y C) x) to (op C (op x y)) or similar, where C // is constant, which pushes constants to the outside // of the expression. At that point, any constant-folding // opportunities should be obvious. +(Add64 x l:(Add64 _ _)) && (x.Op != OpAdd64 && x.Op != OpConst64) -> (Add64 l x) +(Add32 x l:(Add32 _ _)) && (x.Op != OpAdd32 && x.Op != OpConst32) -> (Add32 l x) +(Add16 x l:(Add16 _ _)) && (x.Op != OpAdd16 && x.Op != OpConst16) -> (Add16 l x) +(Add8 x l:(Add8 _ _)) && (x.Op != OpAdd8 && x.Op != OpConst8) -> (Add8 l x) +(And64 x l:(And64 _ _)) && (x.Op != OpAnd64 && x.Op != OpConst64) -> (And64 l x) +(And32 x l:(And32 _ _)) && (x.Op != OpAnd32 && x.Op != OpConst32) -> (And32 l x) +(And16 x l:(And16 _ _)) && (x.Op != OpAnd16 && x.Op != OpConst16) -> (And16 l x) +(And8 x l:(And8 _ _)) && (x.Op != OpAnd8 && x.Op != OpConst8) -> (And8 l x) +(Or64 x l:(Or64 _ _)) && (x.Op != OpOr64 && x.Op != OpConst64) -> (Or64 l x) +(Or32 x l:(Or32 _ _)) && (x.Op != OpOr32 && x.Op != OpConst32) -> (Or32 l x) +(Or16 x l:(Or16 _ _)) && (x.Op != OpOr16 && x.Op != OpConst16) -> (Or16 l x) +(Or8 x l:(Or8 _ _)) && (x.Op != OpOr8 && x.Op != OpConst8) -> (Or8 l x) +(Xor64 x l:(Xor64 _ _)) && (x.Op != OpXor64 && x.Op != OpConst64) -> (Xor64 l x) +(Xor32 x l:(Xor32 _ _)) && (x.Op != OpXor32 && x.Op != OpConst32) -> (Xor32 l x) +(Xor16 x l:(Xor16 _ _)) && (x.Op != OpXor16 && x.Op != OpConst16) -> (Xor16 l x) +(Xor8 x l:(Xor8 _ _)) && (x.Op != OpXor8 && x.Op != OpConst8) -> (Xor8 l x) +(Mul64 x l:(Mul64 _ _)) && (x.Op != OpMul64 && x.Op != OpConst64) -> (Mul64 l x) +(Mul32 x l:(Mul32 _ _)) && (x.Op != OpMul32 && x.Op != OpConst32) -> (Mul32 l x) +(Mul16 x l:(Mul16 _ _)) && (x.Op != OpMul16 && x.Op != OpConst16) -> (Mul16 l x) +(Mul8 x l:(Mul8 _ _)) && (x.Op != OpMul8 && x.Op != OpConst8) -> (Mul8 l x) + // x + (C + z) -> C + (x + z) (Add64 (Add64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) -> (Add64 i (Add64 z x)) (Add32 (Add32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) -> (Add32 i (Add32 z x)) @@ -1280,13 +1379,19 @@ // floating point optimizations (Add32F x (Const32F [0])) -> x +(Add32F (Const32F [0]) x) -> x (Add64F x (Const64F [0])) -> x +(Add64F (Const64F [0]) x) -> x (Sub32F x (Const32F [0])) -> x (Sub64F x (Const64F [0])) -> x (Mul32F x (Const32F [f2i(1)])) -> x +(Mul32F (Const32F [f2i(1)]) x) -> x (Mul64F x (Const64F [f2i(1)])) -> x +(Mul64F (Const64F [f2i(1)]) x) -> x (Mul32F x (Const32F [f2i(-1)])) -> (Neg32F x) +(Mul32F (Const32F [f2i(-1)]) x) -> (Neg32F x) (Mul64F x (Const64F [f2i(-1)])) -> (Neg64F x) +(Mul64F (Const64F [f2i(-1)]) x) -> (Neg64F x) (Div32F x (Const32F [f2i(1)])) -> x (Div64F x (Const64F [f2i(1)])) -> x (Div32F x (Const32F [f2i(-1)])) -> (Neg32F x) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 7991f32679..a5ac62829e 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -28,8 +28,8 @@ var genericOps = []opData{ {name: "Add32", argLength: 2, commutative: true}, {name: "Add64", argLength: 2, commutative: true}, {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int. - {name: "Add32F", argLength: 2, commutative: true}, - {name: "Add64F", argLength: 2, commutative: true}, + {name: "Add32F", argLength: 2}, + {name: "Add64F", argLength: 2}, {name: "Sub8", argLength: 2}, // arg0 - arg1 {name: "Sub16", argLength: 2}, @@ -43,25 +43,24 @@ var genericOps = []opData{ {name: "Mul16", argLength: 2, commutative: true}, {name: "Mul32", argLength: 2, commutative: true}, {name: "Mul64", argLength: 2, commutative: true}, - {name: "Mul32F", argLength: 2, commutative: true}, - {name: "Mul64F", argLength: 2, commutative: true}, + {name: "Mul32F", argLength: 2}, + {name: "Mul64F", argLength: 2}, {name: "Div32F", argLength: 2}, // arg0 / arg1 {name: "Div64F", argLength: 2}, - {name: "Hmul32", argLength: 2, commutative: true}, - {name: "Hmul32u", argLength: 2, commutative: true}, - {name: "Hmul64", argLength: 2, commutative: true}, - {name: "Hmul64u", argLength: 2, commutative: true}, + {name: "Hmul32", argLength: 2}, + {name: "Hmul32u", argLength: 2}, + {name: "Hmul64", argLength: 2}, + {name: "Hmul64u", argLength: 2}, - {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo) - {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo) + {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)"}, // arg0 * arg1, returns (hi, lo) + {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)"}, // arg0 * arg1, returns (hi, lo) // Weird special instructions for use in the strength reduction of divides. // These ops compute unsigned (arg0 + arg1) / 2, correct to all // 32/64 bits, even when the intermediate result of the add has 33/65 bits. // These ops can assume arg0 >= arg1. - // Note: these ops aren't commutative! {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only @@ -160,8 +159,8 @@ var genericOps = []opData{ {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"}, {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend - {name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"}, - {name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Eq32F", argLength: 2, typ: "Bool"}, + {name: "Eq64F", argLength: 2, typ: "Bool"}, {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1 {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"}, @@ -170,8 +169,8 @@ var genericOps = []opData{ {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"}, {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend - {name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"}, - {name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Neq32F", argLength: 2, typ: "Bool"}, + {name: "Neq64F", argLength: 2}, {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 6924bbca95..beabca97d0 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -30,7 +30,7 @@ import ( // sexpr [&& extra conditions] -> [@block] sexpr // // sexpr are s-expressions (lisp-like parenthesized groupings) -// sexpr ::= [variable:](opcode sexpr*) +// sexpr ::= (opcode sexpr*) // | variable // | // | [auxint] @@ -39,7 +39,7 @@ import ( // aux ::= variable | {code} // type ::= variable | {code} // variable ::= some token -// opcode ::= one of the opcodes from the *Ops.go files +// opcode ::= one of the opcodes from ../op.go (without the Op prefix) // extra conditions is just a chunk of Go that evaluates to a boolean. It may use // variables declared in the matching sexpr. The variable "v" is predefined to be @@ -119,17 +119,15 @@ func genRules(arch arch) { } loc := fmt.Sprintf("%s.rules:%d", arch.name, ruleLineno) - for _, crule := range commute(rule, arch) { - r := Rule{rule: crule, loc: loc} - if rawop := strings.Split(crule, " ")[0][1:]; isBlock(rawop, arch) { - blockrules[rawop] = append(blockrules[rawop], r) - } else { - // Do fancier value op matching. - match, _, _ := r.parse() - op, oparch, _, _, _, _ := parseValue(match, arch, loc) - opname := fmt.Sprintf("Op%s%s", oparch, op.name) - oprules[opname] = append(oprules[opname], r) - } + r := Rule{rule: rule, loc: loc} + if rawop := strings.Split(rule, " ")[0][1:]; isBlock(rawop, arch) { + blockrules[rawop] = append(blockrules[rawop], r) + } else { + // Do fancier value op matching. + match, _, _ := r.parse() + op, oparch, _, _, _, _ := parseValue(match, arch, loc) + opname := fmt.Sprintf("Op%s%s", oparch, op.name) + oprules[opname] = append(oprules[opname], r) } rule = "" ruleLineno = 0 @@ -754,169 +752,3 @@ func isVariable(s string) bool { } return b } - -// commute returns all equivalent rules to r after applying all possible -// argument swaps to the commutable ops in r. -// Potentially exponential, be careful. -func commute(r string, arch arch) []string { - match, cond, result := Rule{rule: r}.parse() - a := commute1(match, varCount(match), arch) - for i, m := range a { - if cond != "" { - m += " && " + cond - } - m += " -> " + result - a[i] = m - } - if len(a) == 1 && normalizeWhitespace(r) != normalizeWhitespace(a[0]) { - fmt.Println(normalizeWhitespace(r)) - fmt.Println(normalizeWhitespace(a[0])) - panic("commute() is not the identity for noncommuting rule") - } - if false && len(a) > 1 { - fmt.Println(r) - for _, x := range a { - fmt.Println(" " + x) - } - } - return a -} - -func commute1(m string, cnt map[string]int, arch arch) []string { - if m[0] == '<' || m[0] == '[' || m[0] == '{' || isVariable(m) { - return []string{m} - } - // Split up input. - var prefix string - colon := strings.Index(m, ":") - if colon >= 0 && isVariable(m[:colon]) { - prefix = m[:colon+1] - m = m[colon+1:] - } - if m[0] != '(' || m[len(m)-1] != ')' { - panic("non-compound expr in commute1: " + m) - } - s := split(m[1 : len(m)-1]) - op := s[0] - - // Figure out if the op is commutative or not. - commutative := false - for _, x := range genericOps { - if op == x.name { - if x.commutative { - commutative = true - } - break - } - } - if arch.name != "generic" { - for _, x := range arch.ops { - if op == x.name { - if x.commutative { - commutative = true - } - break - } - } - } - var idx0, idx1 int - if commutative { - // Find indexes of two args we can swap. - for i, arg := range s { - if i == 0 || arg[0] == '<' || arg[0] == '[' || arg[0] == '{' { - continue - } - if idx0 == 0 { - idx0 = i - continue - } - if idx1 == 0 { - idx1 = i - break - } - } - if idx1 == 0 { - panic("couldn't find first two args of commutative op " + s[0]) - } - if cnt[s[idx0]] == 1 && cnt[s[idx1]] == 1 || s[idx0] == s[idx1] && cnt[s[idx0]] == 2 { - // When we have (Add x y) with no ther uses of x and y in the matching rule, - // then we can skip the commutative match (Add y x). - commutative = false - } - } - - // Recursively commute arguments. - a := make([][]string, len(s)) - for i, arg := range s { - a[i] = commute1(arg, cnt, arch) - } - - // Choose all possibilities from all args. - r := crossProduct(a) - - // If commutative, do that again with its two args reversed. - if commutative { - a[idx0], a[idx1] = a[idx1], a[idx0] - r = append(r, crossProduct(a)...) - } - - // Construct result. - for i, x := range r { - r[i] = prefix + "(" + x + ")" - } - return r -} - -// varCount returns a map which counts the number of occurrences of -// Value variables in m. -func varCount(m string) map[string]int { - cnt := map[string]int{} - varCount1(m, cnt) - return cnt -} -func varCount1(m string, cnt map[string]int) { - if m[0] == '<' || m[0] == '[' || m[0] == '{' { - return - } - if isVariable(m) { - cnt[m]++ - return - } - // Split up input. - colon := strings.Index(m, ":") - if colon >= 0 && isVariable(m[:colon]) { - cnt[m[:colon]]++ - m = m[colon+1:] - } - if m[0] != '(' || m[len(m)-1] != ')' { - panic("non-compound expr in commute1: " + m) - } - s := split(m[1 : len(m)-1]) - for _, arg := range s[1:] { - varCount1(arg, cnt) - } -} - -// crossProduct returns all possible values -// x[0][i] + " " + x[1][j] + " " + ... + " " + x[len(x)-1][k] -// for all valid values of i, j, ..., k. -func crossProduct(x [][]string) []string { - if len(x) == 1 { - return x[0] - } - var r []string - for _, tail := range crossProduct(x[1:]) { - for _, first := range x[0] { - r = append(r, first+" "+tail) - } - } - return r -} - -// normalizeWhitespace replaces 2+ whitespace sequences with a single space. -func normalizeWhitespace(x string) string { - x = strings.Join(strings.Fields(x), " ") - x = strings.Replace(x, "( ", "(", -1) - x = strings.Replace(x, " )", ")", -1) - return x -} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 81091ee2fa..ce6988e014 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2475,7 +2475,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULL", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AIMULL, reg: regInfo{ @@ -2492,7 +2491,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULLU", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AMULL, reg: regInfo{ @@ -2509,7 +2507,6 @@ var opcodeTable = [...]opInfo{ { name: "MULLQU", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AMULL, reg: regInfo{ @@ -2857,10 +2854,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTL", - argLen: 2, - commutative: true, - asm: x86.ATESTL, + name: "TESTL", + argLen: 2, + asm: x86.ATESTL, reg: regInfo{ inputs: []inputInfo{ {0, 255}, // AX CX DX BX SP BP SI DI @@ -2869,10 +2865,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTW", - argLen: 2, - commutative: true, - asm: x86.ATESTW, + name: "TESTW", + argLen: 2, + asm: x86.ATESTW, reg: regInfo{ inputs: []inputInfo{ {0, 255}, // AX CX DX BX SP BP SI DI @@ -2881,10 +2876,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTB", - argLen: 2, - commutative: true, - asm: x86.ATESTB, + name: "TESTB", + argLen: 2, + asm: x86.ATESTB, reg: regInfo{ inputs: []inputInfo{ {0, 255}, // AX CX DX BX SP BP SI DI @@ -3664,11 +3658,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LEAL1", - auxType: auxSymOff, - argLen: 2, - commutative: true, - symEffect: SymAddr, + name: "LEAL1", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3847,12 +3840,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVBLZX, + name: "MOVBloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3864,12 +3856,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVWLZX, + name: "MOVWloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3897,12 +3888,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVL, + name: "MOVLloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3930,12 +3920,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVB, + name: "MOVBstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3945,12 +3934,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVW, + name: "MOVWstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -3974,12 +3962,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVL, + name: "MOVLstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {1, 255}, // AX CX DX BX SP BP SI DI @@ -4962,7 +4949,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULQ", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AIMULQ, reg: regInfo{ @@ -4979,7 +4965,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULL", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AIMULL, reg: regInfo{ @@ -4996,7 +4981,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULQU", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AMULQ, reg: regInfo{ @@ -5013,7 +4997,6 @@ var opcodeTable = [...]opInfo{ { name: "HMULLU", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AMULL, reg: regInfo{ @@ -5142,7 +5125,6 @@ var opcodeTable = [...]opInfo{ { name: "MULQU2", argLen: 2, - commutative: true, clobberFlags: true, asm: x86.AMULQ, reg: regInfo{ @@ -5526,10 +5508,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTQ", - argLen: 2, - commutative: true, - asm: x86.ATESTQ, + name: "TESTQ", + argLen: 2, + asm: x86.ATESTQ, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -5538,10 +5519,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTL", - argLen: 2, - commutative: true, - asm: x86.ATESTL, + name: "TESTL", + argLen: 2, + asm: x86.ATESTL, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -5550,10 +5530,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTW", - argLen: 2, - commutative: true, - asm: x86.ATESTW, + name: "TESTW", + argLen: 2, + asm: x86.ATESTW, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -5562,10 +5541,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "TESTB", - argLen: 2, - commutative: true, - asm: x86.ATESTB, + name: "TESTB", + argLen: 2, + asm: x86.ATESTB, reg: regInfo{ inputs: []inputInfo{ {0, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -5651,7 +5629,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SHLQconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5667,7 +5645,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SHLLconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5747,7 +5725,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SHRQconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5763,7 +5741,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SHRLconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5779,7 +5757,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SHRWconst", - auxType: auxInt8, + auxType: auxInt16, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5875,7 +5853,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SARQconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5891,7 +5869,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SARLconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5907,7 +5885,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SARWconst", - auxType: auxInt8, + auxType: auxInt16, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5939,7 +5917,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ROLQconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5955,7 +5933,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ROLLconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5971,7 +5949,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ROLWconst", - auxType: auxInt8, + auxType: auxInt16, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -6829,11 +6807,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "LEAQ1", - auxType: auxSymOff, - argLen: 2, - commutative: true, - symEffect: SymAddr, + name: "LEAQ1", + auxType: auxSymOff, + argLen: 2, + symEffect: SymAddr, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7104,12 +7081,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVBLZX, + name: "MOVBloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVBLZX, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7121,12 +7097,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVWLZX, + name: "MOVWloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVWLZX, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7154,12 +7129,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVL, + name: "MOVLloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7187,12 +7161,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVQloadidx1", - auxType: auxSymOff, - argLen: 3, - commutative: true, - symEffect: SymRead, - asm: x86.AMOVQ, + name: "MOVQloadidx1", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7220,12 +7193,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVBstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVB, + name: "MOVBstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVB, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7235,12 +7207,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVWstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVW, + name: "MOVWstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVW, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7264,12 +7235,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVL, + name: "MOVLstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7293,12 +7263,11 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVQstoreidx1", - auxType: auxSymOff, - argLen: 4, - commutative: true, - symEffect: SymWrite, - asm: x86.AMOVQ, + name: "MOVQstoreidx1", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -16565,10 +16534,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "NOR", - argLen: 2, - commutative: true, - asm: ppc64.ANOR, + name: "NOR", + argLen: 2, + asm: ppc64.ANOR, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -18314,7 +18282,6 @@ var opcodeTable = [...]opInfo{ { name: "MULHD", argLen: 2, - commutative: true, resultInArg0: true, clobberFlags: true, asm: s390x.AMULHD, @@ -18331,7 +18298,6 @@ var opcodeTable = [...]opInfo{ { name: "MULHDU", argLen: 2, - commutative: true, resultInArg0: true, clobberFlags: true, asm: s390x.AMULHDU, @@ -18919,7 +18885,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SLDconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, asm: s390x.ASLD, reg: regInfo{ @@ -18933,7 +18899,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SLWconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, asm: s390x.ASLW, reg: regInfo{ @@ -18975,7 +18941,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SRDconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, asm: s390x.ASRD, reg: regInfo{ @@ -18989,7 +18955,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SRWconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, asm: s390x.ASRW, reg: regInfo{ @@ -19033,7 +18999,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SRADconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, clobberFlags: true, asm: s390x.ASRAD, @@ -19048,7 +19014,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SRAWconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, clobberFlags: true, asm: s390x.ASRAW, @@ -19063,7 +19029,7 @@ var opcodeTable = [...]opInfo{ }, { name: "RLLGconst", - auxType: auxInt8, + auxType: auxInt64, argLen: 1, asm: s390x.ARLLG, reg: regInfo{ @@ -19077,7 +19043,7 @@ var opcodeTable = [...]opInfo{ }, { name: "RLLconst", - auxType: auxInt8, + auxType: auxInt32, argLen: 1, asm: s390x.ARLL, reg: regInfo{ @@ -19897,7 +19863,6 @@ var opcodeTable = [...]opInfo{ name: "MOVBZloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVBZ, @@ -19915,7 +19880,6 @@ var opcodeTable = [...]opInfo{ name: "MOVHZloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVHZ, @@ -19933,7 +19897,6 @@ var opcodeTable = [...]opInfo{ name: "MOVWZloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVWZ, @@ -19951,7 +19914,6 @@ var opcodeTable = [...]opInfo{ name: "MOVDloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVD, @@ -19969,7 +19931,6 @@ var opcodeTable = [...]opInfo{ name: "MOVHBRloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVHBR, @@ -19987,7 +19948,6 @@ var opcodeTable = [...]opInfo{ name: "MOVWBRloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVWBR, @@ -20005,7 +19965,6 @@ var opcodeTable = [...]opInfo{ name: "MOVDBRloadidx", auxType: auxSymOff, argLen: 3, - commutative: true, clobberFlags: true, symEffect: SymRead, asm: s390x.AMOVDBR, @@ -20023,7 +19982,6 @@ var opcodeTable = [...]opInfo{ name: "MOVBstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVB, @@ -20039,7 +19997,6 @@ var opcodeTable = [...]opInfo{ name: "MOVHstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVH, @@ -20055,7 +20012,6 @@ var opcodeTable = [...]opInfo{ name: "MOVWstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVW, @@ -20071,7 +20027,6 @@ var opcodeTable = [...]opInfo{ name: "MOVDstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVD, @@ -20087,7 +20042,6 @@ var opcodeTable = [...]opInfo{ name: "MOVHBRstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVHBR, @@ -20103,7 +20057,6 @@ var opcodeTable = [...]opInfo{ name: "MOVWBRstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVWBR, @@ -20119,7 +20072,6 @@ var opcodeTable = [...]opInfo{ name: "MOVDBRstoreidx", auxType: auxSymOff, argLen: 4, - commutative: true, clobberFlags: true, symEffect: SymWrite, asm: s390x.AMOVDBR, @@ -20694,16 +20646,14 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Add32F", - argLen: 2, - commutative: true, - generic: true, + name: "Add32F", + argLen: 2, + generic: true, }, { - name: "Add64F", - argLen: 2, - commutative: true, - generic: true, + name: "Add64F", + argLen: 2, + generic: true, }, { name: "Sub8", @@ -20765,16 +20715,14 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Mul32F", - argLen: 2, - commutative: true, - generic: true, + name: "Mul32F", + argLen: 2, + generic: true, }, { - name: "Mul64F", - argLen: 2, - commutative: true, - generic: true, + name: "Mul64F", + argLen: 2, + generic: true, }, { name: "Div32F", @@ -20787,40 +20735,34 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Hmul32", - argLen: 2, - commutative: true, - generic: true, + name: "Hmul32", + argLen: 2, + generic: true, }, { - name: "Hmul32u", - argLen: 2, - commutative: true, - generic: true, + name: "Hmul32u", + argLen: 2, + generic: true, }, { - name: "Hmul64", - argLen: 2, - commutative: true, - generic: true, + name: "Hmul64", + argLen: 2, + generic: true, }, { - name: "Hmul64u", - argLen: 2, - commutative: true, - generic: true, + name: "Hmul64u", + argLen: 2, + generic: true, }, { - name: "Mul32uhilo", - argLen: 2, - commutative: true, - generic: true, + name: "Mul32uhilo", + argLen: 2, + generic: true, }, { - name: "Mul64uhilo", - argLen: 2, - commutative: true, - generic: true, + name: "Mul64uhilo", + argLen: 2, + generic: true, }, { name: "Avg32u", @@ -21270,16 +21212,14 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Eq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Eq32F", + argLen: 2, + generic: true, }, { - name: "Eq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Eq64F", + argLen: 2, + generic: true, }, { name: "Neq8", @@ -21322,16 +21262,14 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "Neq32F", - argLen: 2, - commutative: true, - generic: true, + name: "Neq32F", + argLen: 2, + generic: true, }, { - name: "Neq64F", - argLen: 2, - commutative: true, - generic: true, + name: "Neq64F", + argLen: 2, + generic: true, }, { name: "Less8", diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 7ac78aa8bf..ba5288de2a 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -620,40 +620,6 @@ func rewriteValue386_Op386ADCL(v *Value) bool { v.AddArg(f) return true } - // match: (ADCL (MOVLconst [c]) x f) - // cond: - // result: (ADCLconst [c] x f) - for { - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - f := v.Args[2] - v.reset(Op386ADCLconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(f) - return true - } - // match: (ADCL x (MOVLconst [c]) f) - // cond: - // result: (ADCLconst [c] x f) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - f := v.Args[2] - v.reset(Op386ADCLconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(f) - return true - } return false } func rewriteValue386_Op386ADDL(v *Value) bool { @@ -687,9 +653,9 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) + // match: (ADDL (SHLLconst [c] x) (SHRLconst [32-c] x)) + // cond: + // result: (ROLLconst [c ] x) for { v_0 := v.Args[0] if v_0.Op != Op386SHLLconst { @@ -701,11 +667,10 @@ func rewriteValue386_Op386ADDL(v *Value) bool { if v_1.Op != Op386SHRLconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(Op386ROLLconst) @@ -713,35 +678,34 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) + // match: (ADDL (SHRLconst [c] x) (SHLLconst [32-c] x)) + // cond: + // result: (ROLLconst [32-c] x) for { v_0 := v.Args[0] if v_0.Op != Op386SHRLconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(Op386ROLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } - // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (ADDL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -754,11 +718,13 @@ func rewriteValue386_Op386ADDL(v *Value) bool { if v_1.Op != Op386SHRWconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(c < 16 && d == 16-c && t.Size() == 2) { + if !(c < 16 && t.Size() == 2) { break } v.reset(Op386ROLWconst) @@ -766,36 +732,38 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (ADDL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != Op386SHRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(c < 16 && d == 16-c && t.Size() == 2) { + if !(c > 0 && t.Size() == 2) { break } v.reset(Op386ROLWconst) - v.AuxInt = c + v.AuxInt = 16 - c v.AddArg(x) return true } - // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (ADDL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -808,11 +776,13 @@ func rewriteValue386_Op386ADDL(v *Value) bool { if v_1.Op != Op386SHRBconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(c < 8 && d == 8-c && t.Size() == 1) { + if !(c < 8 && t.Size() == 1) { break } v.reset(Op386ROLBconst) @@ -820,30 +790,32 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (ADDL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != Op386SHRBconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(c < 8 && d == 8-c && t.Size() == 1) { + if !(c > 0 && t.Size() == 1) { break } v.reset(Op386ROLBconst) - v.AuxInt = c + v.AuxInt = 8 - c v.AddArg(x) return true } @@ -865,24 +837,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (SHLLconst [3] y) x) - // cond: - // result: (LEAL8 x y) - for { - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDL x (SHLLconst [2] y)) // cond: // result: (LEAL4 x y) @@ -901,24 +855,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (SHLLconst [2] y) x) - // cond: - // result: (LEAL4 x y) - for { - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDL x (SHLLconst [1] y)) // cond: // result: (LEAL2 x y) @@ -937,24 +873,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (SHLLconst [1] y) x) - // cond: - // result: (LEAL2 x y) - for { - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDL x (ADDL y y)) // cond: // result: (LEAL2 x y) @@ -973,24 +891,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (ADDL y y) x) - // cond: - // result: (LEAL2 x y) - for { - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - y := v_0.Args[0] - if y != v_0.Args[1] { - break - } - x := v.Args[1] - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDL x (ADDL x y)) // cond: // result: (LEAL2 y x) @@ -1027,42 +927,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (ADDL x y) x) - // cond: - // result: (LEAL2 y x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDL (ADDL y x) x) - // cond: - // result: (LEAL2 y x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(Op386LEAL2) - v.AddArg(y) - v.AddArg(x) - return true - } // match: (ADDL (ADDLconst [c] x) y) // cond: // result: (LEAL1 [c] x y) @@ -1080,17 +944,17 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL y (ADDLconst [c] x)) + // match: (ADDL x (ADDLconst [c] y)) // cond: // result: (LEAL1 [c] x y) for { - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } c := v_1.AuxInt - x := v_1.Args[0] + y := v_1.Args[0] v.reset(Op386LEAL1) v.AuxInt = c v.AddArg(x) @@ -1119,7 +983,7 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (LEAL [c] {s} y) x) + // match: (ADDL (LEAL [c] {s} x) y) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAL1 [c] {s} x y) for { @@ -1129,8 +993,8 @@ func rewriteValue386_Op386ADDL(v *Value) bool { } c := v_0.AuxInt s := v_0.Aux - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] if !(x.Op != OpSB && y.Op != OpSB) { break } @@ -1156,21 +1020,6 @@ func rewriteValue386_Op386ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (NEGL y) x) - // cond: - // result: (SUBL x y) - for { - v_0 := v.Args[0] - if v_0.Op != Op386NEGL { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(Op386SUBL) - v.AddArg(x) - v.AddArg(y) - return true - } return false } func rewriteValue386_Op386ADDLcarry(v *Value) bool { @@ -2247,20 +2096,20 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAL1 [c] {s} y (ADDLconst [d] x)) - // cond: is32Bit(c+d) && x.Op != OpSB + // match: (LEAL1 [c] {s} x (ADDLconst [d] y)) + // cond: is32Bit(c+d) && y.Op != OpSB // result: (LEAL1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { + y := v_1.Args[0] + if !(is32Bit(c+d) && y.Op != OpSB) { break } v.reset(Op386LEAL1) @@ -2292,9 +2141,9 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAL1 [c] {s} (SHLLconst [1] y) x) + // match: (LEAL1 [c] {s} (SHLLconst [1] x) y) // cond: - // result: (LEAL2 [c] {s} x y) + // result: (LEAL2 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -2305,13 +2154,13 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { if v_0.AuxInt != 1 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(Op386LEAL2) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAL1 [c] {s} x (SHLLconst [2] y)) @@ -2336,9 +2185,9 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAL1 [c] {s} (SHLLconst [2] y) x) + // match: (LEAL1 [c] {s} (SHLLconst [2] x) y) // cond: - // result: (LEAL4 [c] {s} x y) + // result: (LEAL4 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -2349,13 +2198,13 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { if v_0.AuxInt != 2 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(Op386LEAL4) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAL1 [c] {s} x (SHLLconst [3] y)) @@ -2380,9 +2229,9 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAL1 [c] {s} (SHLLconst [3] y) x) + // match: (LEAL1 [c] {s} (SHLLconst [3] x) y) // cond: - // result: (LEAL8 [c] {s} x y) + // result: (LEAL8 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -2393,13 +2242,13 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { if v_0.AuxInt != 3 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(Op386LEAL8) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) @@ -2426,21 +2275,21 @@ func rewriteValue386_Op386LEAL1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAL1 [off1] {sym1} y (LEAL [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // match: (LEAL1 [off1] {sym1} x (LEAL [off2] {sym2} y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386LEAL { break } off2 := v_1.AuxInt sym2 := v_1.Aux - x := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + y := v_1.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { break } v.reset(Op386LEAL1) @@ -2916,7 +2765,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -2939,7 +2788,7 @@ func rewriteValue386_Op386MOVBload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { @@ -3038,28 +2887,6 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // cond: - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } // match: (MOVBloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // cond: // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) @@ -3082,28 +2909,6 @@ func rewriteValue386_Op386MOVBloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // cond: - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } return false } func rewriteValue386_Op386MOVBstore(v *Value) bool { @@ -3153,7 +2958,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { @@ -3201,7 +3006,7 @@ func rewriteValue386_Op386MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { @@ -3610,19 +3415,19 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) + // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux - idx := v.Args[0] + ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } d := v_1.AuxInt - ptr := v_1.Args[0] + idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] v.reset(Op386MOVBstoreidx1) @@ -3634,57 +3439,9 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // cond: - // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVBstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { i := v.AuxInt s := v.Aux @@ -3730,9 +3487,9 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux @@ -3742,9 +3499,7 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { if v_2.Op != Op386SHRLconst { break } - if v_2.AuxInt != 8 { - break - } + j := v_2.AuxInt w := v_2.Args[0] x := v.Args[3] if x.Op != Op386MOVBstoreidx1 { @@ -3756,13 +3511,20 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { if x.Aux != s { break } - if idx != x.Args[0] { + if p != x.Args[0] { break } - if p != x.Args[1] { + if idx != x.Args[1] { break } - if w != x.Args[2] { + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { + break + } + if w0.AuxInt != j-8 { + break + } + if w != w0.Args[0] { break } mem := x.Args[3] @@ -3774,452 +3536,555 @@ func rewriteValue386_Op386MOVBstoreidx1(v *Value) bool { v.Aux = s v.AddArg(p) v.AddArg(idx) - v.AddArg(w) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + return false +} +func rewriteValue386_Op386MOVLload(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 8 { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLstore { break } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if x.AuxInt != i-1 { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - if x.Aux != s { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if p != x.Args[0] { + v.reset(Op386MOVLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - if idx != x.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if w != x.Args[2] { + v.reset(Op386MOVLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + v.reset(Op386MOVLloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) + // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { break } - if x.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if idx != x.Args[0] { + v.reset(Op386MOVLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVLloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - if p != x.Args[1] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if w != x.Args[2] { + v.reset(Op386MOVLloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { + // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) + // cond: + // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + if v_1.AuxInt != 2 { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + return false +} +func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { + // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - if x.AuxInt != i-1 { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVLloadidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVLstore(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - if x.Aux != s { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if idx != x.Args[0] { + v.reset(Op386MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { break } - if p != x.Args[1] { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { break } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { + v.reset(Op386MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - if w0.AuxInt != j-8 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if w != w0.Args[0] { + v.reset(Op386MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + v.reset(Op386MOVLstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w0) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL4 { break } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + v.reset(Op386MOVLstoreidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w0) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRLconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) + // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) + v.reset(Op386MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(idx) - v.AddArg(w0) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLload(v *Value) bool { +func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVLload [off1+off2] {sym} ptr mem) + // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { - off1 := v.AuxInt - sym := v.Aux + sc := v.AuxInt + s := v.Aux v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - off2 := v_0.AuxInt + off := v_0.AuxInt ptr := v_0.Args[0] mem := v.Args[1] - if !(is32Bit(off1 + off2)) { + if !(ValAndOff(sc).canAdd(off)) { break } - v.reset(Op386MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(Op386MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) for { - off1 := v.AuxInt + sc := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] if v_0.Op != Op386LEAL { break } - off2 := v_0.AuxInt + off := v_0.AuxInt sym2 := v_0.Aux - base := v_0.Args[0] + ptr := v_0.Args[0] mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVLload) - v.AuxInt = off1 + off2 + v.reset(Op386MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt + x := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] if v_0.Op != Op386LEAL1 { break } - off2 := v_0.AuxInt + off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off1 + off2 + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) for { - off1 := v.AuxInt + x := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] if v_0.Op != Op386LEAL4 { break } - off2 := v_0.AuxInt + off := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLloadidx4) - v.AuxInt = off1 + off2 + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(off) v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLload [off] {sym} (ADDL ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVLloadidx1 [off] {sym} ptr idx mem) + // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) + // cond: + // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) for { - off := v.AuxInt + x := v.AuxInt sym := v.Aux v_0 := v.Args[0] if v_0.Op != Op386ADDL { @@ -4228,11 +4093,8 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { ptr := v_0.Args[0] idx := v_0.Args[1] mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVLloadidx1) - v.AuxInt = off + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = x v.Aux = sym v.AddArg(ptr) v.AddArg(idx) @@ -4241,10 +4103,10 @@ func rewriteValue386_Op386MOVLload(v *Value) bool { } return false } -func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { - // match: (MOVLloadidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) +func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { + // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -4258,7 +4120,7 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { } idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVLloadidx4) + v.reset(Op386MOVLstoreconstidx4) v.AuxInt = c v.Aux = sym v.AddArg(ptr) @@ -4266,99 +4128,129 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} (SHLLconst [2] idx) ptr mem) + // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - c := v.AuxInt + x := v.AuxInt sym := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 2 { + if v_0.Op != Op386ADDLconst { break } - idx := v_0.Args[0] - ptr := v.Args[1] + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] mem := v.Args[2] - v.reset(Op386MOVLloadidx4) - v.AuxInt = c + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - c := v.AuxInt + x := v.AuxInt sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] + c := v_1.AuxInt + idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = c + d + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) + return false +} +func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { + // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - c := v.AuxInt + x := v.AuxInt sym := v.Aux - idx := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - d := v_1.AuxInt - ptr := v_1.Args[0] + c := v_1.AuxInt + idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = c + d + v.reset(Op386MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym v.AddArg(ptr) v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + return false +} +func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { + // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 2 { break } - d := v_1.AuxInt idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLloadidx1) - v.AuxInt = c + d + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) + v.AuxInt = c v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) + // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -4367,23 +4259,49 @@ func rewriteValue386_Op386MOVLloadidx1(v *Value) bool { break } d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLloadidx1) + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { - // match: (MOVLloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) +func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { + // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) + // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -4394,18 +4312,20 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) + // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) + // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -4416,25 +4336,50 @@ func rewriteValue386_Op386MOVLloadidx4(v *Value) bool { } d := v_1.AuxInt idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLloadidx4) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVLstoreidx4) v.AuxInt = c + 4*d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLstore(v *Value) bool { +func rewriteValue386_Op386MOVSDconst(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + types := &b.Func.Config.Types + _ = types + // match: (MOVSDconst [c]) + // cond: config.ctxt.Flag_shared + // result: (MOVSDconst2 (MOVSDconst1 [c])) + for { + c := v.AuxInt + if !(config.ctxt.Flag_shared) { + break + } + v.reset(Op386MOVSDconst2) + v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, types.UInt32) + v0.AuxInt = c + v.AddArg(v0) + return true + } + return false +} +func rewriteValue386_Op386MOVSDload(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) + // result: (MOVSDload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -4444,45 +4389,20 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVLstore) + v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { - break - } - v.reset(Op386MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym - v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -4493,22 +4413,20 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVLstore) + v.reset(Op386MOVSDload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -4520,51 +4438,47 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLstoreidx1) + v.reset(Op386MOVSDloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) + // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if v_0.Op != Op386LEAL8 { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLstoreidx4) + v.reset(Op386MOVSDloadidx8) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstore [off] {sym} (ADDL ptr idx) val mem) + // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem) // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) + // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -4574,460 +4488,347 @@ func rewriteValue386_Op386MOVLstore(v *Value) bool { } ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVLstoreidx1) + v.reset(Op386MOVSDloadidx1) v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLstoreconst(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) +func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { + // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { - sc := v.AuxInt - s := v.Aux + c := v.AuxInt + sym := v.Aux v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - off := v_0.AuxInt + d := v_0.AuxInt ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(Op386MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVSDloadidx1) + v.AuxInt = c + d + v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVSDloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { + // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - v.reset(Op386MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVSDloadidx8) + v.AuxInt = c + d + v.Aux = sym v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (MOVLstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) + // cond: + // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) for { - x := v.AuxInt + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVSDloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVSDstore(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVSDstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(Op386MOVSDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { + if v_0.Op != Op386LEAL { break } - off := v_0.AuxInt + off2 := v_0.AuxInt sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) + v.reset(Op386MOVSDstore) + v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) + v.AddArg(base) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreconst [x] {sym1} (LEAL4 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { - x := v.AuxInt + off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if v_0.Op != Op386LEAL1 { break } - off := v_0.AuxInt + off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(off) + v.reset(Op386MOVSDstoreidx1) + v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreconst [x] {sym} (ADDL ptr idx) mem) - // cond: - // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) + // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { - x := v.AuxInt - sym := v.Aux + off1 := v.AuxInt + sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386ADDL { + if v_0.Op != Op386LEAL8 { break } + off2 := v_0.AuxInt + sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = x - v.Aux = sym + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(Op386MOVSDstoreidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVLstoreconstidx1(v *Value) bool { - // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLLconst [2] idx) mem) - // cond: - // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) + // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - if v_1.AuxInt != 2 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = c + v.reset(Op386MOVSDstoreidx1) + v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) + return false +} +func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { + // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - c := v_0.AuxInt + d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVSDstoreidx1) + v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) + // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - c := v_1.AuxInt + d := v_1.AuxInt idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVSDstoreidx1) + v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVLstoreconstidx4(v *Value) bool { - // match: (MOVLstoreconstidx4 [x] {sym} (ADDLconst [c] ptr) idx mem) +func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { + // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) + // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux v_0 := v.Args[0] if v_0.Op != Op386ADDLconst { break } - c := v_0.AuxInt + d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(c) + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVSDstoreidx8) + v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDLconst [c] idx) mem) + // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) + // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) for { - x := v.AuxInt + c := v.AuxInt sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] if v_1.Op != Op386ADDLconst { break } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(4 * c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVLstoreidx1(v *Value) bool { - // match: (MOVLstoreidx1 [c] {sym} ptr (SHLLconst [2] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (SHLLconst [2] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { - // match: (MOVLstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt + d := v_1.AuxInt idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] - v.reset(Op386MOVLstoreidx4) - v.AuxInt = c + 4*d + v.reset(Op386MOVSDstoreidx8) + v.AuxInt = c + 8*d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) @@ -5037,37 +4838,37 @@ func rewriteValue386_Op386MOVLstoreidx4(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDconst(v *Value) bool { +func rewriteValue386_Op386MOVSSconst(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config types := &b.Func.Config.Types _ = types - // match: (MOVSDconst [c]) + // match: (MOVSSconst [c]) // cond: config.ctxt.Flag_shared - // result: (MOVSDconst2 (MOVSDconst1 [c])) + // result: (MOVSSconst2 (MOVSSconst1 [c])) for { c := v.AuxInt if !(config.ctxt.Flag_shared) { break } - v.reset(Op386MOVSDconst2) - v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, types.UInt32) + v.reset(Op386MOVSSconst2) + v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, types.UInt32) v0.AuxInt = c v.AddArg(v0) return true } return false } -func rewriteValue386_Op386MOVSDload(v *Value) bool { +func rewriteValue386_Op386MOVSSload(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem) + // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) // cond: is32Bit(off1+off2) - // result: (MOVSDload [off1+off2] {sym} ptr mem) + // result: (MOVSSload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -5081,16 +4882,16 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVSDload) + v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5105,16 +4906,16 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVSDload) + v.reset(Op386MOVSSload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) v.AddArg(mem) return true } - // match: (MOVSDload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) + // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5130,7 +4931,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSDloadidx1) + v.reset(Op386MOVSSloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) @@ -5138,14 +4939,14 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDload [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) mem) + // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL8 { + if v_0.Op != Op386LEAL4 { break } off2 := v_0.AuxInt @@ -5156,7 +4957,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSDloadidx8) + v.reset(Op386MOVSSloadidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) @@ -5164,9 +4965,9 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDload [off] {sym} (ADDL ptr idx) mem) + // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem) // cond: ptr.Op != OpSB - // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) + // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -5180,7 +4981,7 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVSDloadidx1) + v.reset(Op386MOVSSloadidx1) v.AuxInt = off v.Aux = sym v.AddArg(ptr) @@ -5190,10 +4991,10 @@ func rewriteValue386_Op386MOVSDload(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { - // match: (MOVSDloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) +func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { + // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) // cond: - // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5205,7 +5006,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] - v.reset(Op386MOVSDloadidx1) + v.reset(Op386MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5213,9 +5014,9 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // cond: - // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) + // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5227,7 +5028,7 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVSDloadidx1) + v.reset(Op386MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5237,10 +5038,10 @@ func rewriteValue386_Op386MOVSDloadidx1(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { - // match: (MOVSDloadidx8 [c] {sym} (ADDLconst [d] ptr) idx mem) +func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { + // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) // cond: - // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) + // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5252,7 +5053,7 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] - v.reset(Op386MOVSDloadidx8) + v.reset(Op386MOVSSloadidx4) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5260,9 +5061,9 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDloadidx8 [c] {sym} ptr (ADDLconst [d] idx) mem) + // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) // cond: - // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) + // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5274,8 +5075,8 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] - v.reset(Op386MOVSDloadidx8) - v.AuxInt = c + 8*d + v.reset(Op386MOVSSloadidx4) + v.AuxInt = c + 4*d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) @@ -5284,14 +5085,14 @@ func rewriteValue386_Op386MOVSDloadidx8(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDstore(v *Value) bool { +func rewriteValue386_Op386MOVSSstore(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) - // result: (MOVSDstore [off1+off2] {sym} ptr val mem) + // result: (MOVSSstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux @@ -5306,7 +5107,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVSDstore) + v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) @@ -5314,9 +5115,9 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5332,7 +5133,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVSDstore) + v.reset(Op386MOVSSstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) @@ -5340,9 +5141,9 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5359,7 +5160,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSDstoreidx1) + v.reset(Op386MOVSSstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) @@ -5368,14 +5169,14 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstore [off1] {sym1} (LEAL8 [off2] {sym2} ptr idx) val mem) + // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL8 { + if v_0.Op != Op386LEAL4 { break } off2 := v_0.AuxInt @@ -5387,7 +5188,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSDstoreidx8) + v.reset(Op386MOVSSstoreidx4) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) @@ -5396,9 +5197,9 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstore [off] {sym} (ADDL ptr idx) val mem) + // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) // cond: ptr.Op != OpSB - // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) + // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) for { off := v.AuxInt sym := v.Aux @@ -5413,7 +5214,7 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVSDstoreidx1) + v.reset(Op386MOVSSstoreidx1) v.AuxInt = off v.Aux = sym v.AddArg(ptr) @@ -5424,10 +5225,10 @@ func rewriteValue386_Op386MOVSDstore(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { - // match: (MOVSDstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) +func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { + // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) + // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -5440,7 +5241,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] - v.reset(Op386MOVSDstoreidx1) + v.reset(Op386MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5449,9 +5250,9 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) + // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -5464,7 +5265,7 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] - v.reset(Op386MOVSDstoreidx1) + v.reset(Op386MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5475,10 +5276,10 @@ func rewriteValue386_Op386MOVSDstoreidx1(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { - // match: (MOVSDstoreidx8 [c] {sym} (ADDLconst [d] ptr) idx val mem) +func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { + // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) // cond: - // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) + // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -5491,7 +5292,7 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] - v.reset(Op386MOVSDstoreidx8) + v.reset(Op386MOVSSstoreidx4) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) @@ -5500,9 +5301,9 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) // cond: - // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) + // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -5515,8 +5316,8 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] - v.reset(Op386MOVSDstoreidx8) - v.AuxInt = c + 8*d + v.reset(Op386MOVSSstoreidx4) + v.AuxInt = c + 4*d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) @@ -5526,60 +5327,62 @@ func rewriteValue386_Op386MOVSDstoreidx8(v *Value) bool { } return false } -func rewriteValue386_Op386MOVSSconst(v *Value) bool { +func rewriteValue386_Op386MOVWLSX(v *Value) bool { b := v.Block _ = b - config := b.Func.Config - _ = config - types := &b.Func.Config.Types - _ = types - // match: (MOVSSconst [c]) - // cond: config.ctxt.Flag_shared - // result: (MOVSSconst2 (MOVSSconst1 [c])) + // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWLSXload [off] {sym} ptr mem) for { - c := v.AuxInt - if !(config.ctxt.Flag_shared) { + x := v.Args[0] + if x.Op != Op386MOVWload { break } - v.reset(Op386MOVSSconst2) - v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, types.UInt32) - v0.AuxInt = c + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, Op386MOVWLSXload, v.Type) + v.reset(OpCopy) v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVSSload(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVSSload [off1+off2] {sym} ptr mem) + // match: (MOVWLSX (ANDLconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDLconst [c & 0x7fff] x) for { - off1 := v.AuxInt - sym := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + if v_0.Op != Op386ANDLconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { break } - v.reset(Op386MOVSSload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + v.reset(Op386ANDLconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) return true } - // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + return false +} +func rewriteValue386_Op386MOVWLSXload(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5594,193 +5397,145 @@ func rewriteValue386_Op386MOVSSload(v *Value) bool { if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVSSload) + v.reset(Op386MOVWLSXload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) v.AddArg(mem) return true } - // match: (MOVSSload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + return false +} +func rewriteValue386_Op386MOVWLZX(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { + x := v.Args[0] + if x.Op != Op386MOVWload { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVSSloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + b = x.Block + v0 := b.NewValue0(v.Pos, Op386MOVWload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (MOVSSload [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + x := v.Args[0] + if x.Op != Op386MOVWloadidx1 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(Op386MOVSSloadidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + b = x.Block + v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (MOVSSload [off] {sym} (ADDL ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) + // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { + x := v.Args[0] + if x.Op != Op386MOVWloadidx2 { break } - v.reset(Op386MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVSSloadidx1(v *Value) bool { - // match: (MOVSSloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) - // cond: - // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVSSloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + b = x.Block + v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (MOVSSloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) + // match: (MOVWLZX (ANDLconst [c] x)) // cond: - // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) + // result: (ANDLconst [c & 0xffff] x) for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + v_0 := v.Args[0] + if v_0.Op != Op386ANDLconst { break } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVSSloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(Op386ANDLconst) + v.AuxInt = c & 0xffff + v.AddArg(x) return true } return false } -func rewriteValue386_Op386MOVSSloadidx4(v *Value) bool { - // match: (MOVSSloadidx4 [c] {sym} (ADDLconst [d] ptr) idx mem) - // cond: - // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVSSloadidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSloadidx4 [c] {sym} ptr (ADDLconst [d] idx) mem) - // cond: - // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) +func rewriteValue386_Op386MOVWload(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - c := v.AuxInt + off := v.AuxInt sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { + if v_1.Op != Op386MOVWstore { break } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVSSloadidx4) - v.AuxInt = c + 4*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - return false -} -func rewriteValue386_Op386MOVSSstore(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) // cond: is32Bit(off1+off2) - // result: (MOVSSstore [off1+off2] {sym} ptr val mem) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { off1 := v.AuxInt sym := v.Aux @@ -5790,22 +5545,20 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { } off2 := v_0.AuxInt ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVSSstore) + v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5816,22 +5569,20 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVSSstore) + v.reset(Op386MOVWload) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) + // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -5843,51 +5594,47 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSSstoreidx1) + v.reset(Op386MOVWloadidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstore [off1] {sym1} (LEAL4 [off2] {sym2} ptr idx) val mem) + // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { off1 := v.AuxInt sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != Op386LEAL4 { + if v_0.Op != Op386LEAL2 { break } off2 := v_0.AuxInt sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVSSstoreidx4) + v.reset(Op386MOVWloadidx2) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstore [off] {sym} (ADDL ptr idx) val mem) + // match: (MOVWload [off] {sym} (ADDL ptr idx) mem) // cond: ptr.Op != OpSB - // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) + // result: (MOVWloadidx1 [off] {sym} ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -5897,26 +5644,48 @@ func rewriteValue386_Op386MOVSSstore(v *Value) bool { } ptr := v_0.Args[0] idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] + mem := v.Args[1] if !(ptr.Op != OpSB) { break } - v.reset(Op386MOVSSstoreidx1) + v.reset(Op386MOVWloadidx1) v.AuxInt = off v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { - // match: (MOVSSstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) +func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { + // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) // cond: - // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) + // result: (MOVWloadidx2 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 1 { + break + } + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWloadidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) + // cond: + // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5927,20 +5696,18 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVSSstoreidx1) + mem := v.Args[2] + v.reset(Op386MOVWloadidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) // cond: - // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) + // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5951,23 +5718,21 @@ func rewriteValue386_Op386MOVSSstoreidx1(v *Value) bool { } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVSSstoreidx1) + mem := v.Args[2] + v.reset(Op386MOVWloadidx1) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { - // match: (MOVSSstoreidx4 [c] {sym} (ADDLconst [d] ptr) idx val mem) +func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { + // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) // cond: - // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) + // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -5978,20 +5743,18 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { d := v_0.AuxInt ptr := v_0.Args[0] idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVSSstoreidx4) + mem := v.Args[2] + v.reset(Op386MOVWloadidx2) v.AuxInt = c + d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) // cond: - // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) + // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -6002,228 +5765,67 @@ func rewriteValue386_Op386MOVSSstoreidx4(v *Value) bool { } d := v_1.AuxInt idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVSSstoreidx4) - v.AuxInt = c + 4*d + mem := v.Args[2] + v.reset(Op386MOVWloadidx2) + v.AuxInt = c + 2*d v.Aux = sym v.AddArg(ptr) v.AddArg(idx) - v.AddArg(val) v.AddArg(mem) return true } return false } -func rewriteValue386_Op386MOVWLSX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWLSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != Op386MOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWLSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWLSX (ANDLconst [c] x)) - // cond: c & 0x8000 == 0 - // result: (ANDLconst [c & 0x7fff] x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - if !(c&0x8000 == 0) { - break - } - v.reset(Op386ANDLconst) - v.AuxInt = c & 0x7fff - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_Op386MOVWLSXload(v *Value) bool { +func rewriteValue386_Op386MOVWstore(v *Value) bool { b := v.Block _ = b config := b.Func.Config _ = config - // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(Op386MOVWLSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWLZX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != Op386MOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWLZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != Op386MOVWloadidx1 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVWLZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != Op386MOVWloadidx2 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVWLZX (ANDLconst [c] x)) + // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) // cond: - // result: (ANDLconst [c & 0xffff] x) + // result: (MOVWstore [off] {sym} ptr x mem) for { - v_0 := v.Args[0] - if v_0.Op != Op386ANDLconst { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVWLSX { break } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(Op386ANDLconst) - v.AuxInt = c & 0xffff + x := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - return false -} -func rewriteValue386_Op386MOVWload(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x + // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) for { off := v.AuxInt sym := v.Aux ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386MOVWstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if v_1.Op != Op386MOVWLZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } - // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) + // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} ptr mem) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { off1 := v.AuxInt sym := v.Aux @@ -6233,20 +5835,45 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { } off2 := v_0.AuxInt ptr := v_0.Args[0] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(is32Bit(off1 + off2)) { break } - v.reset(Op386MOVWload) + v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = sym v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(Op386MOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -6257,20 +5884,22 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { off2 := v_0.AuxInt sym2 := v_0.Aux base := v_0.Args[0] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { break } - v.reset(Op386MOVWload) + v.reset(Op386MOVWstore) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(base) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVWload [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) mem) + // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { off1 := v.AuxInt sym1 := v.Aux @@ -6282,5374 +5911,1600 @@ func rewriteValue386_Op386MOVWload(v *Value) bool { sym2 := v_0.Aux ptr := v_0.Args[0] idx := v_0.Args[1] - mem := v.Args[1] + val := v.Args[1] + mem := v.Args[2] if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(Op386MOVWloadidx1) + v.reset(Op386MOVWstoreidx1) v.AuxInt = off1 + off2 v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL2 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVWloadidx2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off] {sym} (ADDL ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWloadidx1(v *Value) bool { - // match: (MOVWloadidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) - // cond: - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (SHLLconst [1] idx) ptr mem) - // cond: - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] ptr) idx mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} idx (ADDLconst [d] ptr) mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} ptr (ADDLconst [d] idx) mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDLconst [d] idx) ptr mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWloadidx2(v *Value) bool { - // match: (MOVWloadidx2 [c] {sym} (ADDLconst [d] ptr) idx mem) - // cond: - // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx2 [c] {sym} ptr (ADDLconst [d] idx) mem) - // cond: - // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWloadidx2) - v.AuxInt = c + 2*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstore(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVWLSX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVWLZX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(Op386MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { - break - } - v.reset(Op386MOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(Op386MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAL1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL2 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVWstoreidx2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(Op386MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != Op386MOVWstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != Op386MOVWstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(Op386MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { - break - } - v.reset(Op386MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL1 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386LEAL2 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(Op386MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) - // cond: - // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDL { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - v.reset(Op386MOVWstoreconstidx1) - v.AuxInt = x - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - x := v.Args[1] - if x.Op != Op386MOVWstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - mem := x.Args[1] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(Op386MOVLstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { - // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) - // cond: - // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWstoreconstidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) - // cond: - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) - // cond: - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != Op386MOVWstoreconstidx1 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) - // cond: - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(Op386MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) - // cond: - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(Op386MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(2 * c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst [1] i) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != Op386MOVWstoreconstidx2 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(Op386MOVLstoreconstidx1) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type) - v0.AuxInt = 1 - v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { - // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) - // cond: - // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (SHLLconst [1] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - if v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} idx (ADDLconst [d] ptr) val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != Op386ADDLconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386ADDLconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(Op386MOVWstoreidx2) - v.AuxInt = c + 2*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx2 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) - v0.AuxInt = 1 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != Op386SHRLconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != Op386MOVWstoreidx2 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != Op386SHRLconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(Op386MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) - v0.AuxInt = 1 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValue386_Op386MULL(v *Value) bool { - // match: (MULL x (MOVLconst [c])) - // cond: - // result: (MULLconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - v.reset(Op386MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULL (MOVLconst [c]) x) - // cond: - // result: (MULLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(Op386MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValue386_Op386MULLconst(v *Value) bool { - b := v.Block - _ = b - // match: (MULLconst [c] (MULLconst [d] x)) - // cond: - // result: (MULLconst [int64(int32(c * d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MULLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(Op386MULLconst) - v.AuxInt = int64(int32(c * d)) - v.AddArg(x) - return true - } - // match: (MULLconst [-1] x) - // cond: - // result: (NEGL x) - for { - if v.AuxInt != -1 { - break - } - x := v.Args[0] - v.reset(Op386NEGL) - v.AddArg(x) - return true - } - // match: (MULLconst [0] _) - // cond: - // result: (MOVLconst [0]) - for { - if v.AuxInt != 0 { - break - } - v.reset(Op386MOVLconst) - v.AuxInt = 0 - return true - } - // match: (MULLconst [1] x) - // cond: - // result: x - for { - if v.AuxInt != 1 { - break - } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULLconst [3] x) - // cond: - // result: (LEAL2 x x) - for { - if v.AuxInt != 3 { - break - } - x := v.Args[0] - v.reset(Op386LEAL2) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULLconst [5] x) - // cond: - // result: (LEAL4 x x) - for { - if v.AuxInt != 5 { - break - } - x := v.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULLconst [7] x) - // cond: - // result: (LEAL8 (NEGL x) x) - for { - if v.AuxInt != 7 { - break - } - x := v.Args[0] - v.reset(Op386LEAL8) - v0 := b.NewValue0(v.Pos, Op386NEGL, v.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [9] x) - // cond: - // result: (LEAL8 x x) - for { - if v.AuxInt != 9 { - break - } - x := v.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULLconst [11] x) - // cond: - // result: (LEAL2 x (LEAL4 x x)) - for { - if v.AuxInt != 11 { - break - } - x := v.Args[0] - v.reset(Op386LEAL2) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [13] x) - // cond: - // result: (LEAL4 x (LEAL2 x x)) - for { - if v.AuxInt != 13 { - break - } - x := v.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [21] x) - // cond: - // result: (LEAL4 x (LEAL4 x x)) - for { - if v.AuxInt != 21 { - break - } - x := v.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [25] x) - // cond: - // result: (LEAL8 x (LEAL2 x x)) - for { - if v.AuxInt != 25 { - break - } - x := v.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [37] x) - // cond: - // result: (LEAL4 x (LEAL8 x x)) - for { - if v.AuxInt != 37 { - break - } - x := v.Args[0] - v.reset(Op386LEAL4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [41] x) - // cond: - // result: (LEAL8 x (LEAL4 x x)) - for { - if v.AuxInt != 41 { - break - } - x := v.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [73] x) - // cond: - // result: (LEAL8 x (LEAL8 x x)) - for { - if v.AuxInt != 73 { - break - } - x := v.Args[0] - v.reset(Op386LEAL8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SHLLconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(Op386SHLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUBL (SHLLconst [log2(c+1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c+1) && c >= 15) { - break - } - v.reset(Op386SUBL) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) - v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (LEAL1 (SHLLconst [log2(c-1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-1) && c >= 17) { - break - } - v.reset(Op386LEAL1) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-2) && c >= 34 - // result: (LEAL2 (SHLLconst [log2(c-2)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-2) && c >= 34) { - break - } - v.reset(Op386LEAL2) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) - v0.AuxInt = log2(c - 2) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-4) && c >= 68 - // result: (LEAL4 (SHLLconst [log2(c-4)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-4) && c >= 68) { - break - } - v.reset(Op386LEAL4) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) - v0.AuxInt = log2(c - 4) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c-8) && c >= 136 - // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-8) && c >= 136) { - break - } - v.reset(Op386LEAL8) - v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) - v0.AuxInt = log2(c - 8) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(Op386SHLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [c] x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SHLLconst [log2(c/5)] (LEAL4 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(Op386SHLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [c] x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SHLLconst [log2(c/9)] (LEAL8 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(Op386SHLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [int64(int32(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - d := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = int64(int32(c * d)) - return true - } - return false -} -func rewriteValue386_Op386NEGL(v *Value) bool { - // match: (NEGL (MOVLconst [c])) - // cond: - // result: (MOVLconst [int64(int32(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = int64(int32(-c)) - return true - } - return false -} -func rewriteValue386_Op386NOTL(v *Value) bool { - // match: (NOTL (MOVLconst [c])) - // cond: - // result: (MOVLconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - v.reset(Op386MOVLconst) - v.AuxInt = ^c - return true - } - return false -} -func rewriteValue386_Op386ORL(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (ORL x (MOVLconst [c])) - // cond: - // result: (ORLconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386MOVLconst { - break - } - c := v_1.AuxInt - v.reset(Op386ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (MOVLconst [c]) x) - // cond: - // result: (ORLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(Op386ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != Op386SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 32-c) { - break - } - v.reset(Op386ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != Op386SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c < 16 && d == 16-c && t.Size() == 2) { - break - } - v.reset(Op386ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != Op386SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != Op386SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(c < 8 && d == 8-c && t.Size() == 1) { - break - } - v.reset(Op386ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != Op386MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - s0 := v.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != Op386MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := o0.Args[1] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)) o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - mem := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)) x0:(MOVWload [i0] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBload { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - mem := x2.Args[1] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBload { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := v.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - s0 := v.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - s0 := v.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - s0 := v.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s0:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1==i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - s0 := v.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 8 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != Op386MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - o0 := v.Args[0] - if o0.Op != Op386ORL { - break - } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s1 := v.Args[1] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - if mem != x2.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { - break - } - if s0.AuxInt != 16 { - break - } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { - break - } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAL2 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL2 { break } - if idx != x0.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if p != x0.Args[1] { + v.reset(Op386MOVWstoreidx2) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} (ADDL ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - if mem != x0.Args[2] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + v.reset(Op386MOVWstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRLconst { break } - if s0.AuxInt != 16 { + if v_1.AuxInt != 16 { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != Op386MOVWstore { break } - i2 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-2 { break } - if p != x1.Args[0] { + if x.Aux != s { break } - if idx != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + if w != x.Args[1] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w0 mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRLconst { break } - if s1.AuxInt != 24 { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != Op386MOVWstore { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + if x.AuxInt != i-2 { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + if x.Aux != s { break } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + w0 := x.Args[1] + if w0.Op != Op386SHRLconst { break } - if idx != x0.Args[0] { + if w0.AuxInt != j-16 { break } - if p != x0.Args[1] { + if w != w0.Args[0] { break } - if mem != x0.Args[2] { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + v.reset(Op386MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWstoreconst(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - if s0.AuxInt != 16 { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + v.reset(Op386MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL { break } - i2 := x1.AuxInt - if x1.Aux != s { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) { break } - if p != x1.Args[0] { + v.reset(Op386MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [x] {sym1} (LEAL1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL1 { break } - if idx != x1.Args[1] { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[2] { + v.reset(Op386MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [x] {sym1} (LEAL2 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386LEAL2 { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (MOVWstoreconst [x] {sym} (ADDL ptr idx) mem) + // cond: + // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDL { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + v.reset(Op386MOVWstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != Op386MOVWstoreconst { break } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + a := x.AuxInt + if x.Aux != s { break } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - if p != x0.Args[0] { + v.reset(Op386MOVLstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWstoreconstidx1(v *Value) bool { + // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLLconst [1] idx) mem) + // cond: + // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - if idx != x0.Args[1] { + if v_1.AuxInt != 1 { break } - if mem != x0.Args[2] { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWstoreconstidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx1 [x] {sym} (ADDLconst [c] ptr) idx mem) + // cond: + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDLconst [c] idx) mem) + // cond: + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - if s0.AuxInt != 16 { + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != Op386MOVWstoreconstidx1 { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + a := x.AuxInt + if x.Aux != s { break } - i2 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if idx != x1.Args[0] { + if i != x.Args[1] { break } - if p != x1.Args[1] { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - if mem != x1.Args[2] { + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWstoreconstidx2(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWstoreconstidx2 [x] {sym} (ADDLconst [c] ptr) idx mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(Op386MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDLconst [c] idx) mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(Op386MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(2 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLLconst [1] i) mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != Op386MOVWstoreconstidx2 { break } - if s1.AuxInt != 24 { + a := x.AuxInt + if x.Aux != s { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + if p != x.Args[0] { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + if i != x.Args[1] { break } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(Op386MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, i.Type) + v0.AuxInt = 1 + v0.AddArg(i) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MOVWstoreidx1(v *Value) bool { + // match: (MOVWstoreidx1 [c] {sym} ptr (SHLLconst [1] idx) val mem) + // cond: + // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { + break + } + if v_1.AuxInt != 1 { break } - if p != x0.Args[0] { + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVWstoreidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [c] {sym} (ADDLconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - if idx != x0.Args[1] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVWstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - if mem != x0.Args[2] { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVWstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + if v_2.AuxInt != 16 { break } - if s0.AuxInt != 16 { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx1 { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + if x.AuxInt != i-2 { break } - i2 := x1.AuxInt - if x1.Aux != s { + if x.Aux != s { break } - if idx != x1.Args[0] { + if p != x.Args[0] { break } - if p != x1.Args[1] { + if idx != x.Args[1] { break } - if mem != x1.Args[2] { + if w != x.Args[2] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (MOVWstoreidx1 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { - break - } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - if mem != x0.Args[2] { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx1 { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + if x.AuxInt != i-2 { break } - if s0.AuxInt != 16 { + if x.Aux != s { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + if p != x.Args[0] { break } - i2 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if idx != x1.Args[0] { + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { break } - if p != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + return false +} +func rewriteValue386_Op386MOVWstoreidx2(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWstoreidx2 [c] {sym} (ADDLconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != Op386ADDLconst { break } - x0 := o0.Args[0] - if x0.Op != Op386MOVWloadidx1 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVWstoreidx2) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [c] {sym} ptr (ADDLconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386ADDLconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(Op386MOVWstoreidx2) + v.AuxInt = c + 2*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - if idx != x0.Args[0] { + if v_2.AuxInt != 16 { break } - if p != x0.Args[1] { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx2 { break } - if mem != x0.Args[2] { + if x.AuxInt != i-2 { break } - s0 := o0.Args[1] - if s0.Op != Op386SHLLconst { + if x.Aux != s { break } - if s0.AuxInt != 16 { + if p != x.Args[0] { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + if idx != x.Args[1] { break } - i2 := x1.AuxInt - if x1.Aux != s { + if w != x.Args[2] { break } - if idx != x1.Args[0] { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - if p != x1.Args[1] { + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [i] {s} p idx (SHRLconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRLconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLLconst [1] idx) w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != Op386SHRLconst { break } - if mem != x1.Args[2] { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != Op386MOVWstoreidx2 { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if x.AuxInt != i-2 { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) - for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { + if x.Aux != s { break } - if s1.AuxInt != 24 { + if p != x.Args[0] { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + if idx != x.Args[1] { break } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + w0 := x.Args[2] + if w0.Op != Op386SHRLconst { break } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { + if w0.AuxInt != j-16 { break } - if s0.AuxInt != 16 { + if w != w0.Args[0] { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - i2 := x1.AuxInt - if x1.Aux != s { + v.reset(Op386MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) + v.AddArg(v0) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValue386_Op386MULL(v *Value) bool { + // match: (MULL x (MOVLconst [c])) + // cond: + // result: (MULLconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { break } - if p != x1.Args[0] { + c := v_1.AuxInt + v.reset(Op386MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULL (MOVLconst [c]) x) + // cond: + // result: (MULLconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - if idx != x1.Args[1] { + c := v_0.AuxInt + x := v.Args[1] + v.reset(Op386MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValue386_Op386MULLconst(v *Value) bool { + b := v.Block + _ = b + // match: (MULLconst [c] (MULLconst [d] x)) + // cond: + // result: (MULLconst [int64(int32(c * d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MULLconst { break } - if mem != x1.Args[2] { + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(Op386MULLconst) + v.AuxInt = int64(int32(c * d)) + v.AddArg(x) + return true + } + // match: (MULLconst [-1] x) + // cond: + // result: (NEGL x) + for { + if v.AuxInt != -1 { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + x := v.Args[0] + v.reset(Op386NEGL) + v.AddArg(x) + return true + } + // match: (MULLconst [0] _) + // cond: + // result: (MOVLconst [0]) + for { + if v.AuxInt != 0 { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(Op386MOVLconst) + v.AuxInt = 0 + return true + } + // match: (MULLconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { break } - if p != x0.Args[0] { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULLconst [3] x) + // cond: + // result: (LEAL2 x x) + for { + if v.AuxInt != 3 { break } - if idx != x0.Args[1] { + x := v.Args[0] + v.reset(Op386LEAL2) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULLconst [5] x) + // cond: + // result: (LEAL4 x x) + for { + if v.AuxInt != 5 { break } - if mem != x0.Args[2] { + x := v.Args[0] + v.reset(Op386LEAL4) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULLconst [7] x) + // cond: + // result: (LEAL8 (NEGL x) x) + for { + if v.AuxInt != 7 { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + x := v.Args[0] + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386NEGL, v.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [9] x) + // cond: + // result: (LEAL8 x x) + for { + if v.AuxInt != 9 { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + x := v.Args[0] + v.reset(Op386LEAL8) + v.AddArg(x) + v.AddArg(x) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (MULLconst [11] x) + // cond: + // result: (LEAL2 x (LEAL4 x x)) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { + if v.AuxInt != 11 { break } - if s1.AuxInt != 24 { + x := v.Args[0] + v.reset(Op386LEAL2) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [13] x) + // cond: + // result: (LEAL4 x (LEAL2 x x)) + for { + if v.AuxInt != 13 { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + x := v.Args[0] + v.reset(Op386LEAL4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [21] x) + // cond: + // result: (LEAL4 x (LEAL4 x x)) + for { + if v.AuxInt != 21 { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + x := v.Args[0] + v.reset(Op386LEAL4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [25] x) + // cond: + // result: (LEAL8 x (LEAL2 x x)) + for { + if v.AuxInt != 25 { break } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { + x := v.Args[0] + v.reset(Op386LEAL8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [37] x) + // cond: + // result: (LEAL4 x (LEAL8 x x)) + for { + if v.AuxInt != 37 { break } - if s0.AuxInt != 16 { + x := v.Args[0] + v.reset(Op386LEAL4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [41] x) + // cond: + // result: (LEAL8 x (LEAL4 x x)) + for { + if v.AuxInt != 41 { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + x := v.Args[0] + v.reset(Op386LEAL8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [73] x) + // cond: + // result: (LEAL8 x (LEAL8 x x)) + for { + if v.AuxInt != 73 { break } - i2 := x1.AuxInt - if x1.Aux != s { + x := v.Args[0] + v.reset(Op386LEAL8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SHLLconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { break } - if p != x1.Args[0] { + v.reset(Op386SHLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUBL (SHLLconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { break } - if idx != x1.Args[1] { + v.reset(Op386SUBL) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (LEAL1 (SHLLconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { break } - if mem != x1.Args[2] { + v.reset(Op386LEAL1) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c-2) && c >= 34 + // result: (LEAL2 (SHLLconst [log2(c-2)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-2) && c >= 34) { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + v.reset(Op386LEAL2) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = log2(c - 2) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c-4) && c >= 68 + // result: (LEAL4 (SHLLconst [log2(c-4)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-4) && c >= 68) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(Op386LEAL4) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = log2(c - 4) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: isPowerOfTwo(c-8) && c >= 136 + // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-8) && c >= 136) { break } - if p != x0.Args[0] { + v.reset(Op386LEAL8) + v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type) + v0.AuxInt = log2(c - 8) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLconst [c] x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if idx != x0.Args[1] { + v.reset(Op386SHLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SHLLconst [log2(c/5)] (LEAL4 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - if mem != x0.Args[2] { + v.reset(Op386SHLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SHLLconst [log2(c/9)] (LEAL8 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + v.reset(Op386SHLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [int64(int32(c*d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + d := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = int64(int32(c * d)) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + return false +} +func rewriteValue386_Op386NEGL(v *Value) bool { + // match: (NEGL (MOVLconst [c])) + // cond: + // result: (MOVLconst [int64(int32(-c))]) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - if s1.AuxInt != 24 { + c := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = int64(int32(-c)) + return true + } + return false +} +func rewriteValue386_Op386NOTL(v *Value) bool { + // match: (NOTL (MOVLconst [c])) + // cond: + // result: (MOVLconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + c := v_0.AuxInt + v.reset(Op386MOVLconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValue386_Op386ORL(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (ORL x (MOVLconst [c])) + // cond: + // result: (ORLconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386MOVLconst { break } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + c := v_1.AuxInt + v.reset(Op386ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORL (MOVLconst [c]) x) + // cond: + // result: (ORLconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386MOVLconst { break } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { + c := v_0.AuxInt + x := v.Args[1] + v.reset(Op386ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHLLconst [c] x) (SHRLconst [32-c] x)) + // cond: + // result: (ROLLconst [c ] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { break } - if s0.AuxInt != 16 { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRLconst { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + if v_1.AuxInt != 32-c { break } - i2 := x1.AuxInt - if x1.Aux != s { + if x != v_1.Args[0] { break } - if idx != x1.Args[0] { + v.reset(Op386ROLLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHRLconst [c] x) (SHLLconst [32-c] x)) + // cond: + // result: (ROLLconst [32-c] x) + for { + v_0 := v.Args[0] + if v_0.Op != Op386SHRLconst { break } - if p != x1.Args[1] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - if mem != x1.Args[2] { + if v_1.AuxInt != 32-c { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + if x != v_1.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(Op386ROLLconst) + v.AuxInt = 32 - c + v.AddArg(x) + return true + } + // match: ( ORL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { break } - if p != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRWconst { break } - if idx != x0.Args[1] { + if v_1.AuxInt != 16-c { break } - if mem != x0.Args[2] { + if x != v_1.Args[0] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if !(c < 16 && t.Size() == 2) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(Op386ROLWconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: ( ORL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != Op386SHRWconst { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + if v_1.AuxInt != 16-c { break } - s0 := o0.Args[0] - if s0.Op != Op386SHLLconst { + if x != v_1.Args[0] { break } - if s0.AuxInt != 16 { + if !(c > 0 && t.Size() == 2) { break } - x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + v.reset(Op386ROLWconst) + v.AuxInt = 16 - c + v.AddArg(x) + return true + } + // match: ( ORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != Op386SHLLconst { break } - i2 := x1.AuxInt - if x1.Aux != s { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHRBconst { break } - if idx != x1.Args[0] { + if v_1.AuxInt != 8-c { break } - if p != x1.Args[1] { + if x != v_1.Args[0] { break } - if mem != x1.Args[2] { + if !(c < 8 && t.Size() == 1) { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + v.reset(Op386ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != Op386SHRBconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != Op386SHLLconst { break } - if p != x0.Args[0] { + if v_1.AuxInt != 8-c { break } - if idx != x0.Args[1] { + if x != v_1.Args[0] { break } - if mem != x0.Args[2] { + if !(c > 0 && t.Size() == 1) { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + v.reset(Op386ROLBconst) + v.AuxInt = 8 - c + v.AddArg(x) + return true + } + // match: (ORL x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (ORL x:(SHLLconst _) y) + // cond: y.Op != Op386SHLLconst + // result: (ORL y x) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { + x := v.Args[0] + if x.Op != Op386SHLLconst { break } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + y := v.Args[1] + if !(y.Op != Op386SHLLconst) { break } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + v.reset(Op386ORL) + v.AddArg(y) + v.AddArg(x) + return true + } + // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) + for { + x0 := v.Args[0] + if x0.Op != Op386MOVBload { break } - s0 := o0.Args[0] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] if s0.Op != Op386SHLLconst { break } - if s0.AuxInt != 16 { + if s0.AuxInt != 8 { break } x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { - break - } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + if x1.Op != Op386MOVBload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x1.AuxInt != i+1 { break } - if idx != x0.Args[0] { + if x1.Aux != s { break } - if p != x0.Args[1] { + if p != x1.Args[0] { break } - if mem != x0.Args[2] { + if mem != x1.Args[1] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 + v0.AuxInt = i v0.Aux = s v0.AddArg(p) - v0.AddArg(idx) v0.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + o0 := v.Args[0] + if o0.Op != Op386ORL { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + x0 := o0.Args[0] + if x0.Op != Op386MOVWload { break } - s0 := o0.Args[0] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] if s0.Op != Op386SHLLconst { break } @@ -11657,157 +7512,128 @@ func rewriteValue386_Op386ORL(v *Value) bool { break } x1 := s0.Args[0] - if x1.Op != Op386MOVBloadidx1 { + if x1.Op != Op386MOVBload { + break + } + if x1.AuxInt != i+2 { break } - i2 := x1.AuxInt if x1.Aux != s { break } if p != x1.Args[0] { break } - if idx != x1.Args[1] { + if mem != x1.Args[1] { break } - if mem != x1.Args[2] { + s1 := v.Args[1] + if s1.Op != Op386SHLLconst { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != Op386MOVBload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x2.AuxInt != i+3 { break } - if idx != x0.Args[0] { + if x2.Aux != s { break } - if p != x0.Args[1] { + if p != x2.Args[0] { break } - if mem != x0.Args[2] { + if mem != x2.Args[1] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) + v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 + v0.AuxInt = i v0.Aux = s v0.AddArg(p) - v0.AddArg(idx) v0.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} p idx mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { - break - } - i3 := x2.AuxInt - s := x2.Aux - p := x2.Args[0] - idx := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + x0 := v.Args[0] + if x0.Op != Op386MOVBloadidx1 { break } - s0 := o0.Args[0] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] if s0.Op != Op386SHLLconst { break } - if s0.AuxInt != 16 { + if s0.AuxInt != 8 { break } x1 := s0.Args[0] if x1.Op != Op386MOVBloadidx1 { break } - i2 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { + if x1.AuxInt != i+1 { break } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { + if x1.Aux != s { break } - if idx != x0.Args[0] { + if p != x1.Args[0] { break } - if p != x0.Args[1] { + if idx != x1.Args[1] { break } - if mem != x0.Args[2] { + if mem != x1.Args[2] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } - b = mergePoint(b, x0, x1, x2) - v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, Op386MOVWloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 + v0.AuxInt = i v0.Aux = s v0.AddArg(p) v0.AddArg(idx) v0.AddArg(mem) return true } - // match: (ORL s1:(SHLLconst [24] x2:(MOVBloadidx1 [i3] {s} idx p mem)) o0:(ORL s0:(SHLLconst [16] x1:(MOVBloadidx1 [i2] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i0] {s} p idx mem) + // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i] {s} p idx mem) for { - s1 := v.Args[0] - if s1.Op != Op386SHLLconst { - break - } - if s1.AuxInt != 24 { - break - } - x2 := s1.Args[0] - if x2.Op != Op386MOVBloadidx1 { + o0 := v.Args[0] + if o0.Op != Op386ORL { break } - i3 := x2.AuxInt - s := x2.Aux - idx := x2.Args[0] - p := x2.Args[1] - mem := x2.Args[2] - o0 := v.Args[1] - if o0.Op != Op386ORL { + x0 := o0.Args[0] + if x0.Op != Op386MOVWloadidx1 { break } - s0 := o0.Args[0] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o0.Args[1] if s0.Op != Op386SHLLconst { break } @@ -11818,44 +7644,55 @@ func rewriteValue386_Op386ORL(v *Value) bool { if x1.Op != Op386MOVBloadidx1 { break } - i2 := x1.AuxInt + if x1.AuxInt != i+2 { + break + } if x1.Aux != s { break } - if idx != x1.Args[0] { + if p != x1.Args[0] { break } - if p != x1.Args[1] { + if idx != x1.Args[1] { break } if mem != x1.Args[2] { break } - x0 := o0.Args[1] - if x0.Op != Op386MOVWloadidx1 { + s1 := v.Args[1] + if s1.Op != Op386SHLLconst { + break + } + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != Op386MOVBloadidx1 { + break + } + if x2.AuxInt != i+3 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x2.Aux != s { break } - if idx != x0.Args[0] { + if p != x2.Args[0] { break } - if p != x0.Args[1] { + if idx != x2.Args[1] { break } - if mem != x0.Args[2] { + if mem != x2.Args[2] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } b = mergePoint(b, x0, x1, x2) v0 := b.NewValue0(v.Pos, Op386MOVLloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 + v0.AuxInt = i v0.Aux = s v0.AddArg(p) v0.AddArg(idx) @@ -13343,9 +9180,9 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHLLconst [c] x) (SHRLconst [d] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) + // match: (XORL (SHLLconst [c] x) (SHRLconst [32-c] x)) + // cond: + // result: (ROLLconst [c ] x) for { v_0 := v.Args[0] if v_0.Op != Op386SHLLconst { @@ -13357,11 +9194,10 @@ func rewriteValue386_Op386XORL(v *Value) bool { if v_1.Op != Op386SHRLconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(Op386ROLLconst) @@ -13369,35 +9205,34 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRLconst [d] x) (SHLLconst [c] x)) - // cond: d == 32-c - // result: (ROLLconst [c] x) + // match: (XORL (SHRLconst [c] x) (SHLLconst [32-c] x)) + // cond: + // result: (ROLLconst [32-c] x) for { v_0 := v.Args[0] if v_0.Op != Op386SHRLconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(Op386ROLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } - // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (XORL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -13410,11 +9245,13 @@ func rewriteValue386_Op386XORL(v *Value) bool { if v_1.Op != Op386SHRWconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(c < 16 && d == 16-c && t.Size() == 2) { + if !(c < 16 && t.Size() == 2) { break } v.reset(Op386ROLWconst) @@ -13422,36 +9259,38 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: c < 16 && d == 16-c && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (XORL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != Op386SHRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(c < 16 && d == 16-c && t.Size() == 2) { + if !(c > 0 && t.Size() == 2) { break } v.reset(Op386ROLWconst) - v.AuxInt = c + v.AuxInt = 16 - c v.AddArg(x) return true } - // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (XORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -13464,11 +9303,13 @@ func rewriteValue386_Op386XORL(v *Value) bool { if v_1.Op != Op386SHRBconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(c < 8 && d == 8-c && t.Size() == 1) { + if !(c < 8 && t.Size() == 1) { break } v.reset(Op386ROLBconst) @@ -13476,30 +9317,32 @@ func rewriteValue386_Op386XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: c < 8 && d == 8-c && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (XORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != Op386SHRBconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != Op386SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(c < 8 && d == 8-c && t.Size() == 1) { + if !(c > 0 && t.Size() == 1) { break } v.reset(Op386ROLBconst) - v.AuxInt = c + v.AuxInt = 8 - c v.AddArg(x) return true } @@ -13565,7 +9408,7 @@ func rewriteValue386_Op386XORLconst(v *Value) bool { return false } func rewriteValue386_OpAdd16(v *Value) bool { - // match: (Add16 x y) + // match: (Add16 x y) // cond: // result: (ADDL x y) for { @@ -13578,7 +9421,7 @@ func rewriteValue386_OpAdd16(v *Value) bool { } } func rewriteValue386_OpAdd32(v *Value) bool { - // match: (Add32 x y) + // match: (Add32 x y) // cond: // result: (ADDL x y) for { @@ -13645,7 +9488,7 @@ func rewriteValue386_OpAdd64F(v *Value) bool { } } func rewriteValue386_OpAdd8(v *Value) bool { - // match: (Add8 x y) + // match: (Add8 x y) // cond: // result: (ADDL x y) for { @@ -13710,7 +9553,7 @@ func rewriteValue386_OpAnd32(v *Value) bool { } } func rewriteValue386_OpAnd8(v *Value) bool { - // match: (And8 x y) + // match: (And8 x y) // cond: // result: (ANDL x y) for { @@ -13799,7 +9642,7 @@ func rewriteValue386_OpCom32(v *Value) bool { } } func rewriteValue386_OpCom8(v *Value) bool { - // match: (Com8 x) + // match: (Com8 x) // cond: // result: (NOTL x) for { @@ -13810,7 +9653,7 @@ func rewriteValue386_OpCom8(v *Value) bool { } } func rewriteValue386_OpConst16(v *Value) bool { - // match: (Const16 [val]) + // match: (Const16 [val]) // cond: // result: (MOVLconst [val]) for { @@ -13821,7 +9664,7 @@ func rewriteValue386_OpConst16(v *Value) bool { } } func rewriteValue386_OpConst32(v *Value) bool { - // match: (Const32 [val]) + // match: (Const32 [val]) // cond: // result: (MOVLconst [val]) for { @@ -13854,7 +9697,7 @@ func rewriteValue386_OpConst64F(v *Value) bool { } } func rewriteValue386_OpConst8(v *Value) bool { - // match: (Const8 [val]) + // match: (Const8 [val]) // cond: // result: (MOVLconst [val]) for { @@ -13967,7 +9810,7 @@ func rewriteValue386_OpCvt64Fto32F(v *Value) bool { } } func rewriteValue386_OpDiv16(v *Value) bool { - // match: (Div16 x y) + // match: (Div16 x y) // cond: // result: (DIVW x y) for { @@ -13993,7 +9836,7 @@ func rewriteValue386_OpDiv16u(v *Value) bool { } } func rewriteValue386_OpDiv32(v *Value) bool { - // match: (Div32 x y) + // match: (Div32 x y) // cond: // result: (DIVL x y) for { @@ -14049,7 +9892,7 @@ func rewriteValue386_OpDiv8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8 x y) + // match: (Div8 x y) // cond: // result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) for { @@ -14070,7 +9913,7 @@ func rewriteValue386_OpDiv8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8u x y) + // match: (Div8u x y) // cond: // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) for { @@ -14089,7 +9932,7 @@ func rewriteValue386_OpDiv8u(v *Value) bool { func rewriteValue386_OpEq16(v *Value) bool { b := v.Block _ = b - // match: (Eq16 x y) + // match: (Eq16 x y) // cond: // result: (SETEQ (CMPW x y)) for { @@ -14106,7 +9949,7 @@ func rewriteValue386_OpEq16(v *Value) bool { func rewriteValue386_OpEq32(v *Value) bool { b := v.Block _ = b - // match: (Eq32 x y) + // match: (Eq32 x y) // cond: // result: (SETEQ (CMPL x y)) for { @@ -14157,7 +10000,7 @@ func rewriteValue386_OpEq64F(v *Value) bool { func rewriteValue386_OpEq8(v *Value) bool { b := v.Block _ = b - // match: (Eq8 x y) + // match: (Eq8 x y) // cond: // result: (SETEQ (CMPB x y)) for { @@ -14174,7 +10017,7 @@ func rewriteValue386_OpEq8(v *Value) bool { func rewriteValue386_OpEqB(v *Value) bool { b := v.Block _ = b - // match: (EqB x y) + // match: (EqB x y) // cond: // result: (SETEQ (CMPB x y)) for { @@ -14208,7 +10051,7 @@ func rewriteValue386_OpEqPtr(v *Value) bool { func rewriteValue386_OpGeq16(v *Value) bool { b := v.Block _ = b - // match: (Geq16 x y) + // match: (Geq16 x y) // cond: // result: (SETGE (CMPW x y)) for { @@ -14242,7 +10085,7 @@ func rewriteValue386_OpGeq16U(v *Value) bool { func rewriteValue386_OpGeq32(v *Value) bool { b := v.Block _ = b - // match: (Geq32 x y) + // match: (Geq32 x y) // cond: // result: (SETGE (CMPL x y)) for { @@ -14310,7 +10153,7 @@ func rewriteValue386_OpGeq64F(v *Value) bool { func rewriteValue386_OpGeq8(v *Value) bool { b := v.Block _ = b - // match: (Geq8 x y) + // match: (Geq8 x y) // cond: // result: (SETGE (CMPB x y)) for { @@ -14327,7 +10170,7 @@ func rewriteValue386_OpGeq8(v *Value) bool { func rewriteValue386_OpGeq8U(v *Value) bool { b := v.Block _ = b - // match: (Geq8U x y) + // match: (Geq8U x y) // cond: // result: (SETAE (CMPB x y)) for { @@ -14364,7 +10207,7 @@ func rewriteValue386_OpGetG(v *Value) bool { func rewriteValue386_OpGreater16(v *Value) bool { b := v.Block _ = b - // match: (Greater16 x y) + // match: (Greater16 x y) // cond: // result: (SETG (CMPW x y)) for { @@ -14398,7 +10241,7 @@ func rewriteValue386_OpGreater16U(v *Value) bool { func rewriteValue386_OpGreater32(v *Value) bool { b := v.Block _ = b - // match: (Greater32 x y) + // match: (Greater32 x y) // cond: // result: (SETG (CMPL x y)) for { @@ -14466,7 +10309,7 @@ func rewriteValue386_OpGreater64F(v *Value) bool { func rewriteValue386_OpGreater8(v *Value) bool { b := v.Block _ = b - // match: (Greater8 x y) + // match: (Greater8 x y) // cond: // result: (SETG (CMPB x y)) for { @@ -14483,7 +10326,7 @@ func rewriteValue386_OpGreater8(v *Value) bool { func rewriteValue386_OpGreater8U(v *Value) bool { b := v.Block _ = b - // match: (Greater8U x y) + // match: (Greater8U x y) // cond: // result: (SETA (CMPB x y)) for { @@ -14498,7 +10341,7 @@ func rewriteValue386_OpGreater8U(v *Value) bool { } } func rewriteValue386_OpHmul32(v *Value) bool { - // match: (Hmul32 x y) + // match: (Hmul32 x y) // cond: // result: (HMULL x y) for { @@ -14591,7 +10434,7 @@ func rewriteValue386_OpIsSliceInBounds(v *Value) bool { func rewriteValue386_OpLeq16(v *Value) bool { b := v.Block _ = b - // match: (Leq16 x y) + // match: (Leq16 x y) // cond: // result: (SETLE (CMPW x y)) for { @@ -14625,7 +10468,7 @@ func rewriteValue386_OpLeq16U(v *Value) bool { func rewriteValue386_OpLeq32(v *Value) bool { b := v.Block _ = b - // match: (Leq32 x y) + // match: (Leq32 x y) // cond: // result: (SETLE (CMPL x y)) for { @@ -14693,7 +10536,7 @@ func rewriteValue386_OpLeq64F(v *Value) bool { func rewriteValue386_OpLeq8(v *Value) bool { b := v.Block _ = b - // match: (Leq8 x y) + // match: (Leq8 x y) // cond: // result: (SETLE (CMPB x y)) for { @@ -14710,7 +10553,7 @@ func rewriteValue386_OpLeq8(v *Value) bool { func rewriteValue386_OpLeq8U(v *Value) bool { b := v.Block _ = b - // match: (Leq8U x y) + // match: (Leq8U x y) // cond: // result: (SETBE (CMPB x y)) for { @@ -14727,7 +10570,7 @@ func rewriteValue386_OpLeq8U(v *Value) bool { func rewriteValue386_OpLess16(v *Value) bool { b := v.Block _ = b - // match: (Less16 x y) + // match: (Less16 x y) // cond: // result: (SETL (CMPW x y)) for { @@ -14761,7 +10604,7 @@ func rewriteValue386_OpLess16U(v *Value) bool { func rewriteValue386_OpLess32(v *Value) bool { b := v.Block _ = b - // match: (Less32 x y) + // match: (Less32 x y) // cond: // result: (SETL (CMPL x y)) for { @@ -14829,7 +10672,7 @@ func rewriteValue386_OpLess64F(v *Value) bool { func rewriteValue386_OpLess8(v *Value) bool { b := v.Block _ = b - // match: (Less8 x y) + // match: (Less8 x y) // cond: // result: (SETL (CMPB x y)) for { @@ -14846,7 +10689,7 @@ func rewriteValue386_OpLess8(v *Value) bool { func rewriteValue386_OpLess8U(v *Value) bool { b := v.Block _ = b - // match: (Less8U x y) + // match: (Less8U x y) // cond: // result: (SETB (CMPB x y)) for { @@ -15026,7 +10869,7 @@ func rewriteValue386_OpLsh16x64(v *Value) bool { func rewriteValue386_OpLsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -15135,7 +10978,7 @@ func rewriteValue386_OpLsh32x64(v *Value) bool { func rewriteValue386_OpLsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -15244,7 +11087,7 @@ func rewriteValue386_OpLsh8x64(v *Value) bool { func rewriteValue386_OpLsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -15266,7 +11109,7 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { } } func rewriteValue386_OpMod16(v *Value) bool { - // match: (Mod16 x y) + // match: (Mod16 x y) // cond: // result: (MODW x y) for { @@ -15292,7 +11135,7 @@ func rewriteValue386_OpMod16u(v *Value) bool { } } func rewriteValue386_OpMod32(v *Value) bool { - // match: (Mod32 x y) + // match: (Mod32 x y) // cond: // result: (MODL x y) for { @@ -15322,7 +11165,7 @@ func rewriteValue386_OpMod8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8 x y) + // match: (Mod8 x y) // cond: // result: (MODW (SignExt8to16 x) (SignExt8to16 y)) for { @@ -15343,7 +11186,7 @@ func rewriteValue386_OpMod8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8u x y) + // match: (Mod8u x y) // cond: // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) for { @@ -15648,7 +11491,7 @@ func rewriteValue386_OpMove(v *Value) bool { return false } func rewriteValue386_OpMul16(v *Value) bool { - // match: (Mul16 x y) + // match: (Mul16 x y) // cond: // result: (MULL x y) for { @@ -15661,7 +11504,7 @@ func rewriteValue386_OpMul16(v *Value) bool { } } func rewriteValue386_OpMul32(v *Value) bool { - // match: (Mul32 x y) + // match: (Mul32 x y) // cond: // result: (MULL x y) for { @@ -15713,7 +11556,7 @@ func rewriteValue386_OpMul64F(v *Value) bool { } } func rewriteValue386_OpMul8(v *Value) bool { - // match: (Mul8 x y) + // match: (Mul8 x y) // cond: // result: (MULL x y) for { @@ -15726,7 +11569,7 @@ func rewriteValue386_OpMul8(v *Value) bool { } } func rewriteValue386_OpNeg16(v *Value) bool { - // match: (Neg16 x) + // match: (Neg16 x) // cond: // result: (NEGL x) for { @@ -15737,7 +11580,7 @@ func rewriteValue386_OpNeg16(v *Value) bool { } } func rewriteValue386_OpNeg32(v *Value) bool { - // match: (Neg32 x) + // match: (Neg32 x) // cond: // result: (NEGL x) for { @@ -15820,7 +11663,7 @@ func rewriteValue386_OpNeg64F(v *Value) bool { return false } func rewriteValue386_OpNeg8(v *Value) bool { - // match: (Neg8 x) + // match: (Neg8 x) // cond: // result: (NEGL x) for { @@ -15833,7 +11676,7 @@ func rewriteValue386_OpNeg8(v *Value) bool { func rewriteValue386_OpNeq16(v *Value) bool { b := v.Block _ = b - // match: (Neq16 x y) + // match: (Neq16 x y) // cond: // result: (SETNE (CMPW x y)) for { @@ -15850,7 +11693,7 @@ func rewriteValue386_OpNeq16(v *Value) bool { func rewriteValue386_OpNeq32(v *Value) bool { b := v.Block _ = b - // match: (Neq32 x y) + // match: (Neq32 x y) // cond: // result: (SETNE (CMPL x y)) for { @@ -15901,7 +11744,7 @@ func rewriteValue386_OpNeq64F(v *Value) bool { func rewriteValue386_OpNeq8(v *Value) bool { b := v.Block _ = b - // match: (Neq8 x y) + // match: (Neq8 x y) // cond: // result: (SETNE (CMPB x y)) for { @@ -15918,7 +11761,7 @@ func rewriteValue386_OpNeq8(v *Value) bool { func rewriteValue386_OpNeqB(v *Value) bool { b := v.Block _ = b - // match: (NeqB x y) + // match: (NeqB x y) // cond: // result: (SETNE (CMPB x y)) for { @@ -16014,7 +11857,7 @@ func rewriteValue386_OpOr32(v *Value) bool { } } func rewriteValue386_OpOr8(v *Value) bool { - // match: (Or8 x y) + // match: (Or8 x y) // cond: // result: (ORL x y) for { @@ -16151,7 +11994,7 @@ func rewriteValue386_OpRsh16Ux64(v *Value) bool { func rewriteValue386_OpRsh16Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { @@ -16268,7 +12111,7 @@ func rewriteValue386_OpRsh16x64(v *Value) bool { func rewriteValue386_OpRsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { @@ -16380,7 +12223,7 @@ func rewriteValue386_OpRsh32Ux64(v *Value) bool { func rewriteValue386_OpRsh32Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -16497,7 +12340,7 @@ func rewriteValue386_OpRsh32x64(v *Value) bool { func rewriteValue386_OpRsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { @@ -16609,7 +12452,7 @@ func rewriteValue386_OpRsh8Ux64(v *Value) bool { func rewriteValue386_OpRsh8Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { @@ -16726,7 +12569,7 @@ func rewriteValue386_OpRsh8x64(v *Value) bool { func rewriteValue386_OpRsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { @@ -16762,7 +12605,7 @@ func rewriteValue386_OpSignExt16to32(v *Value) bool { } } func rewriteValue386_OpSignExt8to16(v *Value) bool { - // match: (SignExt8to16 x) + // match: (SignExt8to16 x) // cond: // result: (MOVBLSX x) for { @@ -16773,7 +12616,7 @@ func rewriteValue386_OpSignExt8to16(v *Value) bool { } } func rewriteValue386_OpSignExt8to32(v *Value) bool { - // match: (SignExt8to32 x) + // match: (SignExt8to32 x) // cond: // result: (MOVBLSX x) for { @@ -16927,7 +12770,7 @@ func rewriteValue386_OpStore(v *Value) bool { return false } func rewriteValue386_OpSub16(v *Value) bool { - // match: (Sub16 x y) + // match: (Sub16 x y) // cond: // result: (SUBL x y) for { @@ -16940,7 +12783,7 @@ func rewriteValue386_OpSub16(v *Value) bool { } } func rewriteValue386_OpSub32(v *Value) bool { - // match: (Sub32 x y) + // match: (Sub32 x y) // cond: // result: (SUBL x y) for { @@ -17007,7 +12850,7 @@ func rewriteValue386_OpSub64F(v *Value) bool { } } func rewriteValue386_OpSub8(v *Value) bool { - // match: (Sub8 x y) + // match: (Sub8 x y) // cond: // result: (SUBL x y) for { @@ -17033,7 +12876,7 @@ func rewriteValue386_OpSubPtr(v *Value) bool { } } func rewriteValue386_OpTrunc16to8(v *Value) bool { - // match: (Trunc16to8 x) + // match: (Trunc16to8 x) // cond: // result: x for { @@ -17057,7 +12900,7 @@ func rewriteValue386_OpTrunc32to16(v *Value) bool { } } func rewriteValue386_OpTrunc32to8(v *Value) bool { - // match: (Trunc32to8 x) + // match: (Trunc32to8 x) // cond: // result: x for { @@ -17095,7 +12938,7 @@ func rewriteValue386_OpXor32(v *Value) bool { } } func rewriteValue386_OpXor8(v *Value) bool { - // match: (Xor8 x y) + // match: (Xor8 x y) // cond: // result: (XORL x y) for { @@ -17394,7 +13237,7 @@ func rewriteValue386_OpZeroExt16to32(v *Value) bool { } } func rewriteValue386_OpZeroExt8to16(v *Value) bool { - // match: (ZeroExt8to16 x) + // match: (ZeroExt8to16 x) // cond: // result: (MOVBLZX x) for { @@ -17405,7 +13248,7 @@ func rewriteValue386_OpZeroExt8to16(v *Value) bool { } } func rewriteValue386_OpZeroExt8to32(v *Value) bool { - // match: (ZeroExt8to32 x) + // match: (ZeroExt8to32 x) // cond: // result: (MOVBLZX x) for { @@ -17747,7 +13590,7 @@ func rewriteBlock386(b *Block) bool { return true } case BlockIf: - // match: (If (SETL cmp) yes no) + // match: (If (SETL cmp) yes no) // cond: // result: (LT cmp yes no) for { @@ -17781,7 +13624,7 @@ func rewriteBlock386(b *Block) bool { _ = no return true } - // match: (If (SETG cmp) yes no) + // match: (If (SETG cmp) yes no) // cond: // result: (GT cmp yes no) for { @@ -17849,7 +13692,7 @@ func rewriteBlock386(b *Block) bool { _ = no return true } - // match: (If (SETB cmp) yes no) + // match: (If (SETB cmp) yes no) // cond: // result: (ULT cmp yes no) for { @@ -17877,713 +13720,349 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386ULE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETA cmp) yes no) - // cond: - // result: (UGT cmp yes no) - for { - v := b.Control - if v.Op != Op386SETA { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386UGT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETAE cmp) yes no) - // cond: - // result: (UGE cmp yes no) - for { - v := b.Control - if v.Op != Op386SETAE { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386UGE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETGF cmp) yes no) - // cond: - // result: (UGT cmp yes no) - for { - v := b.Control - if v.Op != Op386SETGF { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386UGT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETGEF cmp) yes no) - // cond: - // result: (UGE cmp yes no) - for { - v := b.Control - if v.Op != Op386SETGEF { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386UGE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETEQF cmp) yes no) - // cond: - // result: (EQF cmp yes no) - for { - v := b.Control - if v.Op != Op386SETEQF { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386EQF - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If (SETNEF cmp) yes no) - // cond: - // result: (NEF cmp yes no) - for { - v := b.Control - if v.Op != Op386SETNEF { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386NEF - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (If cond yes no) - // cond: - // result: (NE (TESTB cond cond) yes no) - for { - v := b.Control - _ = v - cond := b.Control - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386NE - v0 := b.NewValue0(v.Pos, Op386TESTB, TypeFlags) - v0.AddArg(cond) - v0.AddArg(cond) - b.SetControl(v0) - _ = yes - _ = no - return true - } - case Block386LE: - // match: (LE (InvertFlags cmp) yes no) - // cond: - // result: (GE cmp yes no) - for { - v := b.Control - if v.Op != Op386InvertFlags { - break - } - cmp := v.Args[0] - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386GE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (LE (FlagEQ) yes no) - // cond: - // result: (First nil yes no) - for { - v := b.Control - if v.Op != Op386FlagEQ { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - _ = yes - _ = no - return true - } - // match: (LE (FlagLT_ULT) yes no) - // cond: - // result: (First nil yes no) - for { - v := b.Control - if v.Op != Op386FlagLT_ULT { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - _ = yes - _ = no - return true - } - // match: (LE (FlagLT_UGT) yes no) - // cond: - // result: (First nil yes no) - for { - v := b.Control - if v.Op != Op386FlagLT_UGT { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - _ = yes - _ = no - return true - } - // match: (LE (FlagGT_ULT) yes no) - // cond: - // result: (First nil no yes) - for { - v := b.Control - if v.Op != Op386FlagGT_ULT { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - b.swapSuccessors() - _ = no - _ = yes - return true - } - // match: (LE (FlagGT_UGT) yes no) - // cond: - // result: (First nil no yes) - for { - v := b.Control - if v.Op != Op386FlagGT_UGT { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - b.swapSuccessors() - _ = no + b.Kind = Block386ULE + b.SetControl(cmp) _ = yes + _ = no return true } - case Block386LT: - // match: (LT (InvertFlags cmp) yes no) + // match: (If (SETA cmp) yes no) // cond: - // result: (GT cmp yes no) + // result: (UGT cmp yes no) for { v := b.Control - if v.Op != Op386InvertFlags { + if v.Op != Op386SETA { break } cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386GT + b.Kind = Block386UGT b.SetControl(cmp) _ = yes _ = no return true } - // match: (LT (FlagEQ) yes no) + // match: (If (SETAE cmp) yes no) // cond: - // result: (First nil no yes) + // result: (UGE cmp yes no) for { v := b.Control - if v.Op != Op386FlagEQ { + if v.Op != Op386SETAE { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - b.swapSuccessors() - _ = no + b.Kind = Block386UGE + b.SetControl(cmp) _ = yes + _ = no return true } - // match: (LT (FlagLT_ULT) yes no) + // match: (If (SETGF cmp) yes no) // cond: - // result: (First nil yes no) + // result: (UGT cmp yes no) for { v := b.Control - if v.Op != Op386FlagLT_ULT { + if v.Op != Op386SETGF { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) + b.Kind = Block386UGT + b.SetControl(cmp) _ = yes _ = no return true } - // match: (LT (FlagLT_UGT) yes no) + // match: (If (SETGEF cmp) yes no) // cond: - // result: (First nil yes no) + // result: (UGE cmp yes no) for { v := b.Control - if v.Op != Op386FlagLT_UGT { + if v.Op != Op386SETGEF { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) + b.Kind = Block386UGE + b.SetControl(cmp) _ = yes _ = no return true } - // match: (LT (FlagGT_ULT) yes no) + // match: (If (SETEQF cmp) yes no) // cond: - // result: (First nil no yes) + // result: (EQF cmp yes no) for { v := b.Control - if v.Op != Op386FlagGT_ULT { + if v.Op != Op386SETEQF { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - b.swapSuccessors() - _ = no + b.Kind = Block386EQF + b.SetControl(cmp) _ = yes + _ = no return true } - // match: (LT (FlagGT_UGT) yes no) + // match: (If (SETNEF cmp) yes no) // cond: - // result: (First nil no yes) + // result: (NEF cmp yes no) for { v := b.Control - if v.Op != Op386FlagGT_UGT { + if v.Op != Op386SETNEF { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockFirst - b.SetControl(nil) - b.swapSuccessors() - _ = no + b.Kind = Block386NEF + b.SetControl(cmp) _ = yes + _ = no return true } - case Block386NE: - // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) + // match: (If cond yes no) // cond: - // result: (LT cmp yes no) + // result: (NE (TESTB cond cond) yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETL { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETL { - break - } - if cmp != v_1.Args[0] { - break - } + _ = v + cond := b.Control yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386LT - b.SetControl(cmp) + b.Kind = Block386NE + v0 := b.NewValue0(v.Pos, Op386TESTB, TypeFlags) + v0.AddArg(cond) + v0.AddArg(cond) + b.SetControl(v0) _ = yes _ = no return true } - // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) + case Block386LE: + // match: (LE (InvertFlags cmp) yes no) // cond: - // result: (LT cmp yes no) + // result: (GE cmp yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETL { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETL { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386InvertFlags { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386LT + b.Kind = Block386GE b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) + // match: (LE (FlagEQ) yes no) // cond: - // result: (LE cmp yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETLE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETLE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagEQ { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386LE - b.SetControl(cmp) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) + // match: (LE (FlagLT_ULT) yes no) // cond: - // result: (LE cmp yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETLE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETLE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagLT_ULT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386LE - b.SetControl(cmp) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) + // match: (LE (FlagLT_UGT) yes no) // cond: - // result: (GT cmp yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETG { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETG { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagLT_UGT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386GT - b.SetControl(cmp) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) + // match: (LE (FlagGT_ULT) yes no) // cond: - // result: (GT cmp yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETG { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETG { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagGT_ULT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386GT - b.SetControl(cmp) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) + // match: (LE (FlagGT_UGT) yes no) // cond: - // result: (GE cmp yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETGE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETGE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagGT_UGT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386GE - b.SetControl(cmp) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) + case Block386LT: + // match: (LT (InvertFlags cmp) yes no) // cond: - // result: (GE cmp yes no) + // result: (GT cmp yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETGE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETGE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386InvertFlags { break } + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386GE + b.Kind = Block386GT b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) + // match: (LT (FlagEQ) yes no) // cond: - // result: (EQ cmp yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETEQ { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETEQ { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagEQ { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386EQ - b.SetControl(cmp) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) + // match: (LT (FlagLT_ULT) yes no) // cond: - // result: (EQ cmp yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETEQ { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETEQ { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagLT_ULT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386EQ - b.SetControl(cmp) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) + // match: (LT (FlagLT_UGT) yes no) // cond: - // result: (NE cmp yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETNE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETNE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagLT_UGT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386NE - b.SetControl(cmp) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) + // match: (LT (FlagGT_ULT) yes no) // cond: - // result: (NE cmp yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETNE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETNE { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagGT_ULT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386NE - b.SetControl(cmp) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) + // match: (LT (FlagGT_UGT) yes no) // cond: - // result: (ULT cmp yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETB { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETB { - break - } - if cmp != v_1.Args[0] { + if v.Op != Op386FlagGT_UGT { break } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386ULT - b.SetControl(cmp) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) + case Block386NE: + // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) // cond: - // result: (ULT cmp yes no) + // result: (LT cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETB { + if v_0.Op != Op386SETL { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETB { + if v_1.Op != Op386SETL { break } if cmp != v_1.Args[0] { @@ -18591,27 +14070,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386ULT + b.Kind = Block386LT b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) + // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) // cond: - // result: (ULE cmp yes no) + // result: (LE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETBE { + if v_0.Op != Op386SETLE { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETBE { + if v_1.Op != Op386SETLE { break } if cmp != v_1.Args[0] { @@ -18619,27 +14098,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386ULE + b.Kind = Block386LE b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) + // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) // cond: - // result: (ULE cmp yes no) + // result: (GT cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETBE { + if v_0.Op != Op386SETG { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETBE { + if v_1.Op != Op386SETG { break } if cmp != v_1.Args[0] { @@ -18647,27 +14126,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386ULE + b.Kind = Block386GT b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) + // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) // cond: - // result: (UGT cmp yes no) + // result: (GE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETA { + if v_0.Op != Op386SETGE { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETA { + if v_1.Op != Op386SETGE { break } if cmp != v_1.Args[0] { @@ -18675,27 +14154,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGT + b.Kind = Block386GE b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) + // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) // cond: - // result: (UGT cmp yes no) + // result: (EQ cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETA { + if v_0.Op != Op386SETEQ { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETA { + if v_1.Op != Op386SETEQ { break } if cmp != v_1.Args[0] { @@ -18703,27 +14182,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGT + b.Kind = Block386EQ b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) + // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) // cond: - // result: (UGE cmp yes no) + // result: (NE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETAE { + if v_0.Op != Op386SETNE { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETAE { + if v_1.Op != Op386SETNE { break } if cmp != v_1.Args[0] { @@ -18731,27 +14210,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGE + b.Kind = Block386NE b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) + // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) // cond: - // result: (UGE cmp yes no) + // result: (ULT cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETAE { + if v_0.Op != Op386SETB { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETAE { + if v_1.Op != Op386SETB { break } if cmp != v_1.Args[0] { @@ -18759,27 +14238,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGE + b.Kind = Block386ULT b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) + // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) // cond: - // result: (UGT cmp yes no) + // result: (ULE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETGF { + if v_0.Op != Op386SETBE { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETGF { + if v_1.Op != Op386SETBE { break } if cmp != v_1.Args[0] { @@ -18787,27 +14266,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGT + b.Kind = Block386ULE b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) + // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) // cond: - // result: (UGT cmp yes no) + // result: (UGT cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETGF { + if v_0.Op != Op386SETA { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETGF { + if v_1.Op != Op386SETA { break } if cmp != v_1.Args[0] { @@ -18821,21 +14300,21 @@ func rewriteBlock386(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) + // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) // cond: - // result: (UGE cmp yes no) + // result: (UGE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETGEF { + if v_0.Op != Op386SETAE { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETGEF { + if v_1.Op != Op386SETAE { break } if cmp != v_1.Args[0] { @@ -18849,21 +14328,21 @@ func rewriteBlock386(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) + // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) // cond: - // result: (UGE cmp yes no) + // result: (UGT cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETGEF { + if v_0.Op != Op386SETGF { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETGEF { + if v_1.Op != Op386SETGF { break } if cmp != v_1.Args[0] { @@ -18871,27 +14350,27 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386UGE + b.Kind = Block386UGT b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) + // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) // cond: - // result: (EQF cmp yes no) + // result: (UGE cmp yes no) for { v := b.Control if v.Op != Op386TESTB { break } v_0 := v.Args[0] - if v_0.Op != Op386SETEQF { + if v_0.Op != Op386SETGEF { break } cmp := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != Op386SETEQF { + if v_1.Op != Op386SETGEF { break } if cmp != v_1.Args[0] { @@ -18899,7 +14378,7 @@ func rewriteBlock386(b *Block) bool { } yes := b.Succs[0] no := b.Succs[1] - b.Kind = Block386EQF + b.Kind = Block386UGE b.SetControl(cmp) _ = yes _ = no @@ -18961,34 +14440,6 @@ func rewriteBlock386(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) - // cond: - // result: (NEF cmp yes no) - for { - v := b.Control - if v.Op != Op386TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != Op386SETNEF { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != Op386SETNEF { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = Block386NEF - b.SetControl(cmp) - _ = yes - _ = no - return true - } // match: (NE (InvertFlags cmp) yes no) // cond: // result: (NE cmp yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index fe4b7a0c00..e31d3b453a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -858,9 +858,9 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) + // match: (ADDL (SHLLconst x [c]) (SHRLconst x [32-c])) + // cond: + // result: (ROLLconst x [ c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHLLconst { @@ -872,11 +872,10 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_1.Op != OpAMD64SHRLconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLLconst) @@ -884,35 +883,34 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) + // match: (ADDL (SHRLconst x [c]) (SHLLconst x [32-c])) + // cond: + // result: (ROLLconst x [32-c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHRLconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } - // match: (ADDL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (ADDL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -925,11 +923,13 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_1.Op != OpAMD64SHRWconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(d == 16-c && c < 16 && t.Size() == 2) { + if !(c < 16 && t.Size() == 2) { break } v.reset(OpAMD64ROLWconst) @@ -937,36 +937,38 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (ADDL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != OpAMD64SHRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(d == 16-c && c < 16 && t.Size() == 2) { + if !(c > 0 && t.Size() == 2) { break } v.reset(OpAMD64ROLWconst) - v.AuxInt = c + v.AuxInt = 16 - c v.AddArg(x) return true } - // match: (ADDL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (ADDL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -979,11 +981,13 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { if v_1.Op != OpAMD64SHRBconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(d == 8-c && c < 8 && t.Size() == 1) { + if !(c < 8 && t.Size() == 1) { break } v.reset(OpAMD64ROLBconst) @@ -991,30 +995,32 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.AddArg(x) return true } - // match: (ADDL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (ADDL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != OpAMD64SHRBconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(d == 8-c && c < 8 && t.Size() == 1) { + if !(c > 0 && t.Size() == 1) { break } v.reset(OpAMD64ROLBconst) - v.AuxInt = c + v.AuxInt = 8 - c v.AddArg(x) return true } @@ -1033,21 +1039,6 @@ func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool { v.AddArg(y) return true } - // match: (ADDL (NEGL y) x) - // cond: - // result: (SUBL x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64NEGL { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpAMD64SUBL) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDL x l:(MOVLload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) // result: (ADDLmem x [off] {sym} ptr mem) @@ -1203,9 +1194,9 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(x) return true } - // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) + // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [64-c])) + // cond: + // result: (ROLQconst x [ c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHLQconst { @@ -1217,11 +1208,10 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if v_1.Op != OpAMD64SHRQconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLQconst) @@ -1229,29 +1219,28 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(x) return true } - // match: (ADDQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) + // match: (ADDQ (SHRQconst x [c]) (SHLQconst x [64-c])) + // cond: + // result: (ROLQconst x [64-c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHRQconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLQconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLQconst) - v.AuxInt = c + v.AuxInt = 64 - c v.AddArg(x) return true } @@ -1273,24 +1262,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (SHLQconst [3] y) x) - // cond: - // result: (LEAQ8 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 3 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDQ x (SHLQconst [2] y)) // cond: // result: (LEAQ4 x y) @@ -1309,24 +1280,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (SHLQconst [2] y) x) - // cond: - // result: (LEAQ4 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 2 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDQ x (SHLQconst [1] y)) // cond: // result: (LEAQ2 x y) @@ -1345,24 +1298,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (SHLQconst [1] y) x) - // cond: - // result: (LEAQ2 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDQ x (ADDQ y y)) // cond: // result: (LEAQ2 x y) @@ -1381,24 +1316,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (ADDQ y y) x) - // cond: - // result: (LEAQ2 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - y := v_0.Args[0] - if y != v_0.Args[1] { - break - } - x := v.Args[1] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDQ x (ADDQ x y)) // cond: // result: (LEAQ2 y x) @@ -1435,42 +1352,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(x) return true } - // match: (ADDQ (ADDQ x y) x) - // cond: - // result: (LEAQ2 y x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true - } - // match: (ADDQ (ADDQ y x) x) - // cond: - // result: (LEAQ2 y x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpAMD64LEAQ2) - v.AddArg(y) - v.AddArg(x) - return true - } // match: (ADDQ (ADDQconst [c] x) y) // cond: // result: (LEAQ1 [c] x y) @@ -1488,17 +1369,17 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ y (ADDQconst [c] x)) + // match: (ADDQ x (ADDQconst [c] y)) // cond: // result: (LEAQ1 [c] x y) for { - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64ADDQconst { break } c := v_1.AuxInt - x := v_1.Args[0] + y := v_1.Args[0] v.reset(OpAMD64LEAQ1) v.AuxInt = c v.AddArg(x) @@ -1527,7 +1408,7 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (LEAQ [c] {s} y) x) + // match: (ADDQ (LEAQ [c] {s} x) y) // cond: x.Op != OpSB && y.Op != OpSB // result: (LEAQ1 [c] {s} x y) for { @@ -1537,8 +1418,8 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { } c := v_0.AuxInt s := v_0.Aux - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] if !(x.Op != OpSB && y.Op != OpSB) { break } @@ -1564,21 +1445,6 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v.AddArg(y) return true } - // match: (ADDQ (NEGQ y) x) - // cond: - // result: (SUBQ x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64NEGQ { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpAMD64SUBQ) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) // result: (ADDQmem x [off] {sym} ptr mem) @@ -3490,20 +3356,20 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAQ1 [c] {s} y (ADDQconst [d] x)) - // cond: is32Bit(c+d) && x.Op != OpSB + // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) + // cond: is32Bit(c+d) && y.Op != OpSB // result: (LEAQ1 [c+d] {s} x y) for { c := v.AuxInt s := v.Aux - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64ADDQconst { break } d := v_1.AuxInt - x := v_1.Args[0] - if !(is32Bit(c+d) && x.Op != OpSB) { + y := v_1.Args[0] + if !(is32Bit(c+d) && y.Op != OpSB) { break } v.reset(OpAMD64LEAQ1) @@ -3535,9 +3401,9 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAQ1 [c] {s} (SHLQconst [1] y) x) + // match: (LEAQ1 [c] {s} (SHLQconst [1] x) y) // cond: - // result: (LEAQ2 [c] {s} x y) + // result: (LEAQ2 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -3548,13 +3414,13 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { if v_0.AuxInt != 1 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(OpAMD64LEAQ2) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAQ1 [c] {s} x (SHLQconst [2] y)) @@ -3579,9 +3445,9 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAQ1 [c] {s} (SHLQconst [2] y) x) + // match: (LEAQ1 [c] {s} (SHLQconst [2] x) y) // cond: - // result: (LEAQ4 [c] {s} x y) + // result: (LEAQ4 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -3592,13 +3458,13 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { if v_0.AuxInt != 2 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(OpAMD64LEAQ4) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAQ1 [c] {s} x (SHLQconst [3] y)) @@ -3623,9 +3489,9 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAQ1 [c] {s} (SHLQconst [3] y) x) + // match: (LEAQ1 [c] {s} (SHLQconst [3] x) y) // cond: - // result: (LEAQ8 [c] {s} x y) + // result: (LEAQ8 [c] {s} y x) for { c := v.AuxInt s := v.Aux @@ -3636,13 +3502,13 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { if v_0.AuxInt != 3 { break } - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] v.reset(OpAMD64LEAQ8) v.AuxInt = c v.Aux = s - v.AddArg(x) v.AddArg(y) + v.AddArg(x) return true } // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) @@ -3669,21 +3535,21 @@ func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool { v.AddArg(y) return true } - // match: (LEAQ1 [off1] {sym1} y (LEAQ [off2] {sym2} x)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) for { off1 := v.AuxInt sym1 := v.Aux - y := v.Args[0] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64LEAQ { break } off2 := v_1.AuxInt sym2 := v_1.Aux - x := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { + y := v_1.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { break } v.reset(OpAMD64LEAQ1) @@ -4327,7 +4193,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -4350,7 +4216,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { @@ -4424,7 +4290,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) // cond: canMergeSym(sym1, sym2) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { @@ -4448,7 +4314,7 @@ func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -4496,28 +4362,6 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) // cond: // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) @@ -4540,28 +4384,6 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: - // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVBloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } return false } func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { @@ -4609,7 +4431,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { @@ -4657,7 +4479,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { @@ -4737,7 +4559,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) + // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem)) // cond: x0.Uses == 1 && clobber(x0) // result: (MOVWstore [i-1] {s} p (ROLWconst [8] w) mem) for { @@ -4783,7 +4605,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) + // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVLstore [i-3] {s} p (BSWAPL w) mem) for { @@ -4874,7 +4696,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) + // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem)))))))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) // result: (MOVQstore [i-7] {s} p (BSWAPQ w) mem) for { @@ -5148,7 +4970,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { @@ -5174,7 +4996,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { @@ -5482,30 +5304,6 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} idx (ADDQconst [d] ptr) val mem) - // cond: - // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVBstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) // cond: // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) @@ -5530,82 +5328,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] idx) ptr val mem) - // cond: - // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVBstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) - // cond: x0.Uses == 1 && clobber(x0) - // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst [8] w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x0 := v.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-1 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRWconst { - break - } - if x0_2.AuxInt != 8 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && clobber(x0)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) - v0.AuxInt = 8 - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} idx p (SHRWconst [8] w) mem)) + // match: (MOVBstoreidx1 [i] {s} p idx w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) // cond: x0.Uses == 1 && clobber(x0) // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst [8] w) mem) for { @@ -5624,57 +5347,6 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { if x0.Aux != s { break } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRWconst { - break - } - if x0_2.AuxInt != 8 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && clobber(x0)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) - v0.AuxInt = 8 - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x0:(MOVBstoreidx1 [i-1] {s} p idx (SHRWconst [8] w) mem)) - // cond: x0.Uses == 1 && clobber(x0) - // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst [8] w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x0 := v.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-1 { - break - } - if x0.Aux != s { - break - } if p != x0.Args[0] { break } @@ -5707,58 +5379,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x0:(MOVBstoreidx1 [i-1] {s} idx p (SHRWconst [8] w) mem)) - // cond: x0.Uses == 1 && clobber(x0) - // result: (MOVWstoreidx1 [i-1] {s} p idx (ROLWconst [8] w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x0 := v.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-1 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRWconst { - break - } - if x0_2.AuxInt != 8 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && clobber(x0)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, w.Type) - v0.AuxInt = 8 - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) + // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) for { @@ -5860,122 +5481,124 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) + // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { + x6 := v.Args[3] + if x6.Op != OpAMD64MOVBstoreidx1 { break } - if x2.AuxInt != i-1 { + if x6.AuxInt != i-1 { break } - if x2.Aux != s { + if x6.Aux != s { break } - if p != x2.Args[0] { + if p != x6.Args[0] { break } - if idx != x2.Args[1] { + if idx != x6.Args[1] { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + x6_2 := x6.Args[2] + if x6_2.Op != OpAMD64SHRQconst { break } - if x2_2.AuxInt != 8 { + if x6_2.AuxInt != 8 { break } - if w != x2_2.Args[0] { + if w != x6_2.Args[0] { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + x5 := x6.Args[3] + if x5.Op != OpAMD64MOVBstoreidx1 { break } - if x1.AuxInt != i-2 { + if x5.AuxInt != i-2 { break } - if x1.Aux != s { + if x5.Aux != s { break } - if p != x1.Args[0] { + if p != x5.Args[0] { break } - if idx != x1.Args[1] { + if idx != x5.Args[1] { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + x5_2 := x5.Args[2] + if x5_2.Op != OpAMD64SHRQconst { break } - if x1_2.AuxInt != 16 { + if x5_2.AuxInt != 16 { break } - if w != x1_2.Args[0] { + if w != x5_2.Args[0] { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + x4 := x5.Args[3] + if x4.Op != OpAMD64MOVBstoreidx1 { break } - if x0.AuxInt != i-3 { + if x4.AuxInt != i-3 { break } - if x0.Aux != s { + if x4.Aux != s { break } - if idx != x0.Args[0] { + if p != x4.Args[0] { break } - if p != x0.Args[1] { + if idx != x4.Args[1] { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + x4_2 := x4.Args[2] + if x4_2.Op != OpAMD64SHRQconst { break } - if x0_2.AuxInt != 24 { + if x4_2.AuxInt != 24 { break } - if w != x0_2.Args[0] { + if w != x4_2.Args[0] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + x3 := x4.Args[3] + if x3.Op != OpAMD64MOVBstoreidx1 { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] + if x3.AuxInt != i-4 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if idx != x3.Args[1] { + break + } + x3_2 := x3.Args[2] + if x3_2.Op != OpAMD64SHRQconst { + break + } + if x3_2.AuxInt != 32 { + break + } + if w != x3_2.Args[0] { + break + } + x2 := x3.Args[3] if x2.Op != OpAMD64MOVBstoreidx1 { break } - if x2.AuxInt != i-1 { + if x2.AuxInt != i-5 { break } if x2.Aux != s { @@ -5988,10 +5611,10 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { break } x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + if x2_2.Op != OpAMD64SHRQconst { break } - if x2_2.AuxInt != 8 { + if x2_2.AuxInt != 40 { break } if w != x2_2.Args[0] { @@ -6001,23 +5624,23 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { if x1.Op != OpAMD64MOVBstoreidx1 { break } - if x1.AuxInt != i-2 { + if x1.AuxInt != i-6 { break } if x1.Aux != s { break } - if idx != x1.Args[0] { + if p != x1.Args[0] { break } - if p != x1.Args[1] { + if idx != x1.Args[1] { break } x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + if x1_2.Op != OpAMD64SHRQconst { break } - if x1_2.AuxInt != 16 { + if x1_2.AuxInt != 48 { break } if w != x1_2.Args[0] { @@ -6027,7 +5650,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { if x0.Op != OpAMD64MOVBstoreidx1 { break } - if x0.AuxInt != i-3 { + if x0.AuxInt != i-7 { break } if x0.Aux != s { @@ -6040,75795 +5663,8268 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value) bool { break } x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + if x0_2.Op != OpAMD64SHRQconst { break } - if x0_2.AuxInt != 24 { + if x0_2.AuxInt != 56 { break } if w != x0_2.Args[0] { break } mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 7 v.Aux = s v.AddArg(p) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) v0.AddArg(w) v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { - break - } - if x2_2.AuxInt != 8 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-2 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { - break - } - if x1_2.AuxInt != 16 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - if x0.AuxInt != i-3 { + if v_2.AuxInt != 8 { break } - if x0.Aux != s { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVBstoreidx1 { break } - if idx != x0.Args[0] { + if x.AuxInt != i-1 { break } - if p != x0.Args[1] { + if x.Aux != s { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + if p != x.Args[0] { break } - if x0_2.AuxInt != 24 { + if idx != x.Args[1] { break } - if w != x0_2.Args[0] { + if w != x.Args[2] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = i - 1 v.Aux = s v.AddArg(p) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - if x2.Aux != s { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVBstoreidx1 { break } - if idx != x2.Args[0] { + if x.AuxInt != i-1 { break } - if p != x2.Args[1] { + if x.Aux != s { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + if p != x.Args[0] { break } - if x2_2.AuxInt != 8 { + if idx != x.Args[1] { break } - if w != x2_2.Args[0] { + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + if w0.AuxInt != j-8 { break } - if x1.AuxInt != i-2 { + if w != w0.Args[0] { break } - if x1.Aux != s { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - if p != x1.Args[0] { + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { + b := v.Block + _ = b + // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVLload { break } - if idx != x1.Args[1] { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVQload { break } - if x1_2.AuxInt != 16 { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if w != x1_2.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQSX (ANDLconst [c] x)) + // cond: c & 0x80000000 == 0 + // result: (ANDLconst [c & 0x7fffffff] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ANDLconst { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80000000 == 0) { break } - if x0.AuxInt != i-3 { + v.reset(OpAMD64ANDLconst) + v.AuxInt = c & 0x7fffffff + v.AddArg(x) + return true + } + // match: (MOVLQSX x:(MOVLQSX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVLQSX { break } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLQSX x:(MOVWQSX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWQSX { break } - if x0_2.AuxInt != 24 { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLQSX x:(MOVBQSX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVBQSX { break } - if w != x0_2.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { + // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.reset(OpAMD64MOVLQSXload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + return false +} +func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { + b := v.Block + _ = b + // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLload [off] {sym} ptr mem) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { + x := v.Args[0] + if x.Op != OpAMD64MOVLload { break } - if x2.Aux != s { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if idx != x2.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVQload { break } - if p != x2.Args[1] { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVLloadidx1 { break } - if x2_2.AuxInt != 8 { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - if w != x2_2.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVLloadidx4 { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - if x1.AuxInt != i-2 { + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVLQZX (ANDLconst [c] x)) + // cond: + // result: (ANDLconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ANDLconst { break } - if x1.Aux != s { + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MOVLQZX x:(MOVLQZX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVLQZX { break } - if p != x1.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLQZX x:(MOVWQZX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWQZX { break } - if idx != x1.Args[1] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLQZX x:(MOVBQZX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVBQZX { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { + // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVLatomicload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x1_2.AuxInt != 16 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if w != x1_2.Args[0] { + v.reset(OpAMD64MOVLatomicload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if x0.AuxInt != i-3 { + v.reset(OpAMD64MOVLatomicload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLstore { break } - if x0.Aux != s { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if idx != x0.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x0.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x0_2.AuxInt != 24 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x0_2.Args[0] { + v.reset(OpAMD64MOVLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { break } - if x2_2.AuxInt != 8 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x2_2.Args[0] { + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVLloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if x1.AuxInt != i-2 { + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if x1.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if idx != x1.Args[0] { + v.reset(OpAMD64MOVLload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVLload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if p != x1.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVLload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { + // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) + // cond: + // result: (MOVLloadidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if x1_2.AuxInt != 16 { + if v_1.AuxInt != 2 { break } - if w != x1_2.Args[0] { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if x0.AuxInt != i-3 { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { + // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x0.Aux != s { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if p != x0.Args[0] { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { + // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLQSX { break } - if idx != x0.Args[1] { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLQZX { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x0_2.AuxInt != 24 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if w != x0_2.Args[0] { + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} p idx w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x2.AuxInt != i-1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if x2.Aux != s { + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if idx != x2.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if p != x2.Args[1] { + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if x2_2.AuxInt != 8 { + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if w != x2_2.Args[0] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { break } - if x1.AuxInt != i-2 { + if v_1.AuxInt != 32 { break } - if x1.Aux != s { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { break } - if idx != x1.Args[0] { + if x.AuxInt != i-4 { break } - if p != x1.Args[1] { + if x.Aux != s { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + if p != x.Args[0] { break } - if x1_2.AuxInt != 16 { + if w != x.Args[1] { break } - if w != x1_2.Args[0] { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstore [i-4] {s} p w0 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { break } - if x0.AuxInt != i-3 { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVLstore { break } - if x0.Aux != s { + if x.AuxInt != i-4 { break } - if idx != x0.Args[0] { + if x.Aux != s { break } - if p != x0.Args[1] { + if p != x.Args[0] { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { break } - if x0_2.AuxInt != 24 { + if w0.AuxInt != j-32 { break } - if w != x0_2.Args[0] { + if w != w0.Args[0] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 v.Aux = s v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if x2.AuxInt != i-1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { break } - if x2.Aux != s { + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVLstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if p != x2.Args[0] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if idx != x2.Args[1] { + v.reset(OpAMD64MOVLstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - if x2_2.AuxInt != 8 { + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if w != x2_2.Args[0] { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if x1.AuxInt != i-2 { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if x1.Aux != s { + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { break } - if p != x1.Args[0] { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if idx != x1.Args[1] { + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) + // cond: + // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVLstoreconst { break } - if x1_2.AuxInt != 16 { + a := x.AuxInt + if x.Aux != s { break } - if w != x1_2.Args[0] { + if p != x.Args[0] { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { break } - if x0.AuxInt != i-3 { + v.reset(OpAMD64MOVQstore) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) + v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if x0.Aux != s { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - if p != x0.Args[0] { + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if idx != x0.Args[1] { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if x0_2.AuxInt != 24 { + if v_1.AuxInt != 2 { break } - if w != x0_2.Args[0] { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) for { - i := v.AuxInt + c := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVLstoreconstidx1 { break } - if x2.AuxInt != i-1 { + a := x.AuxInt + if x.Aux != s { break } - if x2.Aux != s { + if p != x.Args[0] { break } - if p != x2.Args[0] { + if i != x.Args[1] { break } - if idx != x2.Args[1] { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) + v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x2_2.AuxInt != 8 { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if w != x2_2.Args[0] { + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(4 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVLstoreconstidx4 { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + a := x.AuxInt + if x.Aux != s { break } - if x1.AuxInt != i-2 { + if p != x.Args[0] { break } - if x1.Aux != s { + if i != x.Args[1] { break } - if p != x1.Args[0] { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { break } - if idx != x1.Args[1] { + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) + v0.AuxInt = 2 + v0.AddArg(i) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) + v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 + v.AddArg(v1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { + // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) + // cond: + // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + if v_1.AuxInt != 2 { break } - if x1_2.AuxInt != 16 { + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if w != x1_2.Args[0] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - if x0.AuxInt != i-3 { + if v_2.AuxInt != 32 { break } - if x0.Aux != s { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx1 { break } - if idx != x0.Args[0] { + if x.AuxInt != i-4 { break } - if p != x0.Args[1] { + if x.Aux != s { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + if p != x.Args[0] { break } - if x0_2.AuxInt != 24 { + if idx != x.Args[1] { break } - if w != x0_2.Args[0] { + if w != x.Args[2] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 v.Aux = s v.AddArg(p) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - if x2.AuxInt != i-1 { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx1 { break } - if x2.Aux != s { + if x.AuxInt != i-4 { break } - if p != x2.Args[0] { + if x.Aux != s { break } - if idx != x2.Args[1] { + if p != x.Args[0] { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + if idx != x.Args[1] { break } - if x2_2.AuxInt != 8 { + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { break } - if w != x2_2.Args[0] { + if w0.AuxInt != j-32 { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + if w != w0.Args[0] { break } - if x1.AuxInt != i-2 { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - if x1.Aux != s { + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { + b := v.Block + _ = b + // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if idx != x1.Args[0] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if p != x1.Args[1] { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + if v_2.AuxInt != 32 { break } - if x1_2.AuxInt != 16 { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx4 { break } - if w != x1_2.Args[0] { + if x.AuxInt != i-4 { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-3 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { + if x.Aux != s { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + if p != x.Args[0] { break } - if x0_2.AuxInt != 24 { + if idx != x.Args[1] { break } - if w != x0_2.Args[0] { + if w != x.Args[2] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 v.Aux = s v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 2 + v0.AddArg(idx) v.AddArg(v0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} p idx (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w0 mem) for { i := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { - break - } - if x2_2.AuxInt != 8 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-2 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { - break - } - if x1_2.AuxInt != 16 { - break - } - if w != x1_2.Args[0] { + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVLstoreidx4 { break } - if x0.AuxInt != i-3 { + if x.AuxInt != i-4 { break } - if x0.Aux != s { + if x.Aux != s { break } - if idx != x0.Args[0] { + if p != x.Args[0] { break } - if p != x0.Args[1] { + if idx != x.Args[1] { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { break } - if x0_2.AuxInt != 24 { + if w0.AuxInt != j-32 { break } - if w != x0_2.Args[0] { + if w != w0.Args[0] { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = i - 4 v.Aux = s v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 2 + v0.AddArg(idx) v.AddArg(v0) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + return false +} +func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { + // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVOload [off1+off2] {sym} ptr mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x2.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVOload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x2_2.AuxInt != 8 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x2_2.Args[0] { + v.reset(OpAMD64MOVOload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { + // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVOstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if x1.AuxInt != i-2 { + v.reset(OpAMD64MOVOstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x1.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if p != x1.Args[0] { + v.reset(OpAMD64MOVOstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { + // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVQatomicload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if idx != x1.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVQatomicload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x1_2.AuxInt != 16 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x1_2.Args[0] { + v.reset(OpAMD64MOVQatomicload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { + // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQstore { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if x0.AuxInt != i-3 { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVQload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x0.Aux != s { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if p != x0.Args[0] { + v.reset(OpAMD64MOVQload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if idx != x0.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVQload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if x0_2.AuxInt != 24 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x0_2.Args[0] { + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} p idx (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVQloadidx1 [off] {sym} ptr idx mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if x2.Aux != s { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if idx != x2.Args[0] { + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if p != x2.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVQload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVQload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if x2_2.AuxInt != 8 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if w != x2_2.Args[0] { + v.reset(OpAMD64MOVQload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { + // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) + // cond: + // result: (MOVQloadidx8 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + if v_1.AuxInt != 3 { break } - if x1.AuxInt != i-2 { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { - break - } - if x1_2.AuxInt != 16 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if w != x1_2.Args[0] { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { + // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if x0.AuxInt != i-3 { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { + // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVQstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x0.Aux != s { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if idx != x0.Args[0] { + v.reset(OpAMD64MOVQstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { break } - if p != x0.Args[1] { + c := v_1.AuxInt + mem := v.Args[2] + if !(validValAndOff(c, off)) { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x0_2.AuxInt != 24 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if w != x0_2.Args[0] { + v.reset(OpAMD64MOVQstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} p idx (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { - break - } - if x2_2.AuxInt != 8 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { break } - if w != x2_2.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if x1.AuxInt != i-2 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - if x1.Aux != s { + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if idx != x1.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { break } - if p != x1.Args[1] { + v.reset(OpAMD64MOVQstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVQstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if x1_2.AuxInt != 16 { + v.reset(OpAMD64MOVQstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { + // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if w != x1_2.Args[0] { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if x0.AuxInt != i-3 { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - if x0.Aux != s { + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if p != x0.Args[0] { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if idx != x0.Args[1] { + v.reset(OpAMD64MOVQstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { break } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if x0_2.AuxInt != 24 { + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) + // cond: + // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if w != x0_2.Args[0] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + v.reset(OpAMD64MOVQstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBstoreidx1 [i] {s} idx p w x2:(MOVBstoreidx1 [i-1] {s} idx p (SHRLconst [8] w) x1:(MOVBstoreidx1 [i-2] {s} idx p (SHRLconst [16] w) x0:(MOVBstoreidx1 [i-3] {s} idx p (SHRLconst [24] w) mem)))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) - // result: (MOVLstoreidx1 [i-3] {s} p idx (BSWAPL w) mem) + // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { - i := v.AuxInt + sc := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x2 := v.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-1 { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if x2.Aux != s { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - if idx != x2.Args[0] { + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { + // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) + // cond: + // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if p != x2.Args[1] { + if v_1.AuxInt != 3 { break } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRLconst { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if x2_2.AuxInt != 8 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-2 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRLconst { - break - } - if x1_2.AuxInt != 16 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-3 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRLconst { - break - } - if x0_2.AuxInt != 24 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 3 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} p idx (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if idx != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} p idx (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if idx != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} p idx (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if idx != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} p idx (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if idx != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} p idx (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if idx != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} p idx (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} p idx (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p w x6:(MOVBstoreidx1 [i-1] {s} idx p (SHRQconst [8] w) x5:(MOVBstoreidx1 [i-2] {s} idx p (SHRQconst [16] w) x4:(MOVBstoreidx1 [i-3] {s} idx p (SHRQconst [24] w) x3:(MOVBstoreidx1 [i-4] {s} idx p (SHRQconst [32] w) x2:(MOVBstoreidx1 [i-5] {s} idx p (SHRQconst [40] w) x1:(MOVBstoreidx1 [i-6] {s} idx p (SHRQconst [48] w) x0:(MOVBstoreidx1 [i-7] {s} idx p (SHRQconst [56] w) mem)))))))) - // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) - // result: (MOVQstoreidx1 [i-7] {s} p idx (BSWAPQ w) mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x6 := v.Args[3] - if x6.Op != OpAMD64MOVBstoreidx1 { - break - } - if x6.AuxInt != i-1 { - break - } - if x6.Aux != s { - break - } - if idx != x6.Args[0] { - break - } - if p != x6.Args[1] { - break - } - x6_2 := x6.Args[2] - if x6_2.Op != OpAMD64SHRQconst { - break - } - if x6_2.AuxInt != 8 { - break - } - if w != x6_2.Args[0] { - break - } - x5 := x6.Args[3] - if x5.Op != OpAMD64MOVBstoreidx1 { - break - } - if x5.AuxInt != i-2 { - break - } - if x5.Aux != s { - break - } - if idx != x5.Args[0] { - break - } - if p != x5.Args[1] { - break - } - x5_2 := x5.Args[2] - if x5_2.Op != OpAMD64SHRQconst { - break - } - if x5_2.AuxInt != 16 { - break - } - if w != x5_2.Args[0] { - break - } - x4 := x5.Args[3] - if x4.Op != OpAMD64MOVBstoreidx1 { - break - } - if x4.AuxInt != i-3 { - break - } - if x4.Aux != s { - break - } - if idx != x4.Args[0] { - break - } - if p != x4.Args[1] { - break - } - x4_2 := x4.Args[2] - if x4_2.Op != OpAMD64SHRQconst { - break - } - if x4_2.AuxInt != 24 { - break - } - if w != x4_2.Args[0] { - break - } - x3 := x4.Args[3] - if x3.Op != OpAMD64MOVBstoreidx1 { - break - } - if x3.AuxInt != i-4 { - break - } - if x3.Aux != s { - break - } - if idx != x3.Args[0] { - break - } - if p != x3.Args[1] { - break - } - x3_2 := x3.Args[2] - if x3_2.Op != OpAMD64SHRQconst { - break - } - if x3_2.AuxInt != 32 { - break - } - if w != x3_2.Args[0] { - break - } - x2 := x3.Args[3] - if x2.Op != OpAMD64MOVBstoreidx1 { - break - } - if x2.AuxInt != i-5 { - break - } - if x2.Aux != s { - break - } - if idx != x2.Args[0] { - break - } - if p != x2.Args[1] { - break - } - x2_2 := x2.Args[2] - if x2_2.Op != OpAMD64SHRQconst { - break - } - if x2_2.AuxInt != 40 { - break - } - if w != x2_2.Args[0] { - break - } - x1 := x2.Args[3] - if x1.Op != OpAMD64MOVBstoreidx1 { - break - } - if x1.AuxInt != i-6 { - break - } - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - x1_2 := x1.Args[2] - if x1_2.Op != OpAMD64SHRQconst { - break - } - if x1_2.AuxInt != 48 { - break - } - if w != x1_2.Args[0] { - break - } - x0 := x1.Args[3] - if x0.Op != OpAMD64MOVBstoreidx1 { - break - } - if x0.AuxInt != i-7 { - break - } - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - x0_2 := x0.Args[2] - if x0_2.Op != OpAMD64SHRQconst { - break - } - if x0_2.AuxInt != 56 { - break - } - if w != x0_2.Args[0] { - break - } - mem := x0.Args[3] - if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 7 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, w.Type) - v0.AddArg(w) - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRQconst [8] w) x:(MOVBstoreidx1 [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRQconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} p idx w0:(SHRQconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVBstoreidx1 [i-1] {s} idx p w0:(SHRQconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx1 [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVBstoreidx1 { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLQSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVQload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVLQSX (ANDLconst [c] x)) - // cond: c & 0x80000000 == 0 - // result: (ANDLconst [c & 0x7fffffff] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - if !(c&0x80000000 == 0) { - break - } - v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0x7fffffff - v.AddArg(x) - return true - } - // match: (MOVLQSX x:(MOVLQSX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQSX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLQSX x:(MOVWQSX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLQSX x:(MOVBQSX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool { - // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLQSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVQload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLloadidx1 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVLQZX x:(MOVLloadidx4 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVLloadidx4 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLloadidx4 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVLQZX (ANDLconst [c] x)) - // cond: - // result: (ANDLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MOVLQZX x:(MOVLQZX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQZX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLQZX x:(MOVWQZX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLQZX x:(MOVBQZX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool { - // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVLatomicload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVLatomicload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLatomicload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool { - // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVLload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVLloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVLload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVLload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLloadidx1(v *Value) bool { - // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) - // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (SHLQconst [2] idx) ptr mem) - // cond: - // result: (MOVLloadidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: - // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value) bool { - // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLloadidx4) - v.AuxInt = c + 4*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool { - // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) - // cond: - // result: (MOVLstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLQSX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) - // cond: - // result: (MOVLstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLQZX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstore [i-4] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVLstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstore [i-4] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVLstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVLstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [x] {sym} (ADDQ ptr idx) mem) - // cond: - // result: (MOVLstoreconstidx1 [x] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = x - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - x := v.Args[1] - if x.Op != OpAMD64MOVLstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - mem := x.Args[1] - if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) - v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) - // cond: - // result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstoreconstidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx1 [c] {s} p i x:(MOVLstoreconstidx1 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p i (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != OpAMD64MOVLstoreconstidx1 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s - v.AddArg(p) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) - v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVLstoreconstidx4) - v.AuxInt = ValAndOff(x).add(4 * c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVLstoreconstidx4 [c] {s} p i x:(MOVLstoreconstidx4 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVQstoreidx1 [ValAndOff(a).Off()] {s} p (SHLQconst [2] i) (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != OpAMD64MOVLstoreconstidx4 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) - v0.AuxInt = 2 - v0.AddArg(i) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64) - v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32 - v.AddArg(v1) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstoreidx1(v *Value) bool { - // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (SHLQconst [2] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 2 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} idx (ADDQconst [d] ptr) val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] idx) ptr val mem) - // cond: - // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} idx p (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} idx p (SHRQconst [32] w) x:(MOVLstoreidx1 [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} idx p w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVLstoreidx1 [i-4] {s} idx p w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx1 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value) bool { - b := v.Block - _ = b - // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVLstoreidx4) - v.AuxInt = c + 4*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [32] w) x:(MOVLstoreidx4 [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx4 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) - v0.AuxInt = 2 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVLstoreidx4 [i] {s} p idx (SHRQconst [j] w) x:(MOVLstoreidx4 [i-4] {s} p idx w0:(SHRQconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVLstoreidx4 { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) - v0.AuxInt = 2 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool { - // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVOload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVOload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVOload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool { - // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVOstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVOstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVOstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool { - // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVQatomicload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVQatomicload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQatomicload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool { - // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVQload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQload [off] {sym} (ADDQ ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVQloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVQload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVQload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVQload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQloadidx1(v *Value) bool { - // match: (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) - // cond: - // result: (MOVQloadidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 3 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) - // cond: - // result: (MOVQloadidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 3 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: - // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value) bool { - // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQloadidx8) - v.AuxInt = c + 8*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool { - // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVQstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) - // cond: validValAndOff(c,off) - // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validValAndOff(c, off)) { - break - } - v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVQstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstoreidx8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVQstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVQstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVQstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool { - // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVQstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVQstoreconstidx8) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [x] {sym} (ADDQ ptr idx) mem) - // cond: - // result: (MOVQstoreconstidx1 [x] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - v.reset(OpAMD64MOVQstoreconstidx1) - v.AuxInt = x - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVQstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1(v *Value) bool { - // match: (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) - // cond: - // result: (MOVQstoreconstidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 3 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQstoreconstidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { - // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVQstoreconstidx8) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVQstoreconstidx8) - v.AuxInt = ValAndOff(x).add(8 * c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { - // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) - // cond: - // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 3 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx1 [c] {sym} (SHLQconst [3] idx) ptr val mem) - // cond: - // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 3 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx1 [c] {sym} idx (ADDQconst [d] ptr) val mem) - // cond: - // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] idx) ptr val mem) - // cond: - // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { - // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx8) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVQstoreidx8) - v.AuxInt = c + 8*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { - // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVSDload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDloadidx8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { - // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) - // cond: - // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 3 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSDloadidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSDloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { - // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVSDloadidx8) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSDloadidx8) - v.AuxInt = c + 8*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { - // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVSDstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVSDstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ8 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSDstoreidx8) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { - // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) - // cond: - // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 3 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSDstoreidx8) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSDstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { - // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSDstoreidx8) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSDstoreidx8) - v.AuxInt = c + 8*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { - // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVSSload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSloadidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { - // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) - // cond: - // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSSloadidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSSloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { - // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVSSloadidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVSSloadidx4) - v.AuxInt = c + 4*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { - // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVSSstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVSSstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ4 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVSSstoreidx4) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { - // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) - // cond: - // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 2 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSSstoreidx4) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSSstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { - // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSSstoreidx4) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVSSstoreidx4) - v.AuxInt = c + 4*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVQload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQSX (ANDLconst [c] x)) - // cond: c & 0x8000 == 0 - // result: (ANDLconst [c & 0x7fff] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - if !(c&0x8000 == 0) { - break - } - v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0x7fff - v.AddArg(x) - return true - } - // match: (MOVWQSX x:(MOVWQSX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVWQSX x:(MOVBQSX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { - // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWQSXload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVLload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVQload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWloadidx1 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWloadidx2 { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVWQZX (ANDLconst [c] x)) - // cond: - // result: (ANDLconst [c & 0xffff] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ANDLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64ANDLconst) - v.AuxInt = c & 0xffff - v.AddArg(x) - return true - } - // match: (MOVWQZX x:(MOVWQZX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVWQZX x:(MOVBQZX _)) - // cond: - // result: x - for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { - // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: x - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ2 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVWloadidx1 [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) - // cond: is32Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { - // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) - // cond: - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (SHLQconst [1] idx) ptr mem) - // cond: - // result: (MOVWloadidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: - // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { - // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: - // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: - // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWloadidx2) - v.AuxInt = c + 2*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { - // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWQSX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVWQZX { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) - // cond: validOff(off) - // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off)) { - break - } - v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ2 { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstoreidx2) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVWstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpAMD64MOVWstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) - // cond: is32Bit(off1+off2) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1 + off2)) { - break - } - v.reset(OpAMD64MOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { - // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ1 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) - // cond: canMergeSym(sym1, sym2) - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) - for { - x := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAQ2 { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpAMD64MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) - // cond: - // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQ { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - v.reset(OpAMD64MOVWstoreconstidx1) - v.AuxInt = x - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - x := v.Args[1] - if x.Op != OpAMD64MOVWstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - mem := x.Args[1] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64LEAL { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDLconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpAMD64MOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { - // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) - // cond: - // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWstoreconstidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWstoreconstidx1) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != OpAMD64MOVWstoreconstidx1 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(i) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpAMD64MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: - // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) - for { - x := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - c := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpAMD64MOVWstoreconstidx2) - v.AuxInt = ValAndOff(x).add(2 * c) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) - // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst [1] i) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - i := v.Args[1] - x := v.Args[2] - if x.Op != OpAMD64MOVWstoreconstidx2 { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if i != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreconstidx1) - v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) - v0.AuxInt = 1 - v0.AddArg(i) - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { - // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) - // cond: - // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - if v_1.AuxInt != 1 { - break - } - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (SHLQconst [1] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - if v_0.AuxInt != 1 { - break - } - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx2) - v.AuxInt = c - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} idx (ADDQconst [d] ptr) val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx1) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx1 [i] {s} idx p (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} idx p w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx1 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx2) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64ADDQconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpAMD64MOVWstoreidx2) - v.AuxInt = c + 2*d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx2 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) - v0.AuxInt = 1 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpAMD64SHRQconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpAMD64MOVWstoreidx2 { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpAMD64SHRQconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpAMD64MOVLstoreidx1) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) - v0.AuxInt = 1 - v0.AddArg(idx) - v.AddArg(v0) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { - // match: (MULL x (MOVLconst [c])) - // cond: - // result: (MULLconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULL (MOVLconst [c]) x) - // cond: - // result: (MULLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64MULLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { - // match: (MULLconst [c] (MULLconst [d] x)) - // cond: - // result: (MULLconst [int64(int32(c * d))] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MULLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v.reset(OpAMD64MULLconst) - v.AuxInt = int64(int32(c * d)) - v.AddArg(x) - return true - } - // match: (MULLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [int64(int32(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(int32(c * d)) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { - // match: (MULQ x (MOVQconst [c])) - // cond: is32Bit(c) - // result: (MULQconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64MULQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (MULQconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64MULQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { - b := v.Block - _ = b - // match: (MULQconst [c] (MULQconst [d] x)) - // cond: is32Bit(c*d) - // result: (MULQconst [c * d] x) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MULQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - if !(is32Bit(c * d)) { - break - } - v.reset(OpAMD64MULQconst) - v.AuxInt = c * d - v.AddArg(x) - return true - } - // match: (MULQconst [-1] x) - // cond: - // result: (NEGQ x) - for { - if v.AuxInt != -1 { - break - } - x := v.Args[0] - v.reset(OpAMD64NEGQ) - v.AddArg(x) - return true - } - // match: (MULQconst [0] _) - // cond: - // result: (MOVQconst [0]) - for { - if v.AuxInt != 0 { - break - } - v.reset(OpAMD64MOVQconst) - v.AuxInt = 0 - return true - } - // match: (MULQconst [1] x) - // cond: - // result: x - for { - if v.AuxInt != 1 { - break - } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULQconst [3] x) - // cond: - // result: (LEAQ2 x x) - for { - if v.AuxInt != 3 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULQconst [5] x) - // cond: - // result: (LEAQ4 x x) - for { - if v.AuxInt != 5 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULQconst [7] x) - // cond: - // result: (LEAQ8 (NEGQ x) x) - for { - if v.AuxInt != 7 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ8) - v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [9] x) - // cond: - // result: (LEAQ8 x x) - for { - if v.AuxInt != 9 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULQconst [11] x) - // cond: - // result: (LEAQ2 x (LEAQ4 x x)) - for { - if v.AuxInt != 11 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ2) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [13] x) - // cond: - // result: (LEAQ4 x (LEAQ2 x x)) - for { - if v.AuxInt != 13 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [21] x) - // cond: - // result: (LEAQ4 x (LEAQ4 x x)) - for { - if v.AuxInt != 21 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [25] x) - // cond: - // result: (LEAQ8 x (LEAQ2 x x)) - for { - if v.AuxInt != 25 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [37] x) - // cond: - // result: (LEAQ4 x (LEAQ8 x x)) - for { - if v.AuxInt != 37 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ4) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [41] x) - // cond: - // result: (LEAQ8 x (LEAQ4 x x)) - for { - if v.AuxInt != 41 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [73] x) - // cond: - // result: (LEAQ8 x (LEAQ8 x x)) - for { - if v.AuxInt != 73 { - break - } - x := v.Args[0] - v.reset(OpAMD64LEAQ8) - v.AddArg(x) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SHLQconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUBQ (SHLQconst [log2(c+1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c+1) && c >= 15) { - break - } - v.reset(OpAMD64SUBQ) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (LEAQ1 (SHLQconst [log2(c-1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-1) && c >= 17) { - break - } - v.reset(OpAMD64LEAQ1) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-2) && c >= 34 - // result: (LEAQ2 (SHLQconst [log2(c-2)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-2) && c >= 34) { - break - } - v.reset(OpAMD64LEAQ2) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 2) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-4) && c >= 68 - // result: (LEAQ4 (SHLQconst [log2(c-4)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-4) && c >= 68) { - break - } - v.reset(OpAMD64LEAQ4) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 4) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c-8) && c >= 136 - // result: (LEAQ8 (SHLQconst [log2(c-8)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-8) && c >= 136) { - break - } - v.reset(OpAMD64LEAQ8) - v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v0.AuxInt = log2(c - 8) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SHLQconst [log2(c/3)] (LEAQ2 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [c] x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SHLQconst [log2(c/5)] (LEAQ4 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [c] x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SHLQconst [log2(c/9)] (LEAQ8 x x)) - for { - c := v.AuxInt - x := v.Args[0] - if !(c%9 == 0 && isPowerOfTwo(c/9)) { - break - } - v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULQconst [c] (MOVQconst [d])) - // cond: - // result: (MOVQconst [c*d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVQconst) - v.AuxInt = c * d - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { - // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (MULSDmem x [off] {sym} ptr mem) - for { - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSDmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (MULSDmem x [off] {sym} ptr mem) - for { - l := v.Args[0] - if l.Op != OpAMD64MOVSDload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSDmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { - // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (MULSSmem x [off] {sym} ptr mem) - for { - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSSmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (MULSSmem x [off] {sym} ptr mem) - for { - l := v.Args[0] - if l.Op != OpAMD64MOVSSload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64MULSSmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { - // match: (NEGL (MOVLconst [c])) - // cond: - // result: (MOVLconst [int64(int32(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVLconst) - v.AuxInt = int64(int32(-c)) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { - // match: (NEGQ (MOVQconst [c])) - // cond: - // result: (MOVQconst [-c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVQconst) - v.AuxInt = -c - return true - } - // match: (NEGQ (ADDQconst [c] (NEGQ x))) - // cond: c != -(1<<31) - // result: (ADDQconst [-c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64ADDQconst { - break - } - c := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAMD64NEGQ { - break - } - x := v_0_0.Args[0] - if !(c != -(1 << 31)) { - break - } - v.reset(OpAMD64ADDQconst) - v.AuxInt = -c - v.AddArg(x) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { - // match: (NOTL (MOVLconst [c])) - // cond: - // result: (MOVLconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVLconst) - v.AuxInt = ^c - return true - } - return false -} -func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { - // match: (NOTQ (MOVQconst [c])) - // cond: - // result: (MOVQconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - v.reset(OpAMD64MOVQconst) - v.AuxInt = ^c - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (ORL x (MOVLconst [c])) - // cond: - // result: (ORLconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVLconst { - break - } - c := v_1.AuxInt - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (MOVLconst [c]) x) - // cond: - // result: (ORLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpAMD64ORLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRLconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRLconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 32-c) { - break - } - v.reset(OpAMD64ROLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRWconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRWconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 16-c && c < 16 && t.Size() == 2) { - break - } - v.reset(OpAMD64ROLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLLconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRBconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRBconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLLconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 8-c && c < 8 && t.Size() == 1) { - break - } - v.reset(OpAMD64ROLBconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORL x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL sh:(SHLLconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLLconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL or:(ORL y s1:(SHLLconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLLconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORL (SHLLconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORL { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLLconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLLconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (ORLmem x [off] {sym} ptr mem) - for { - x := v.Args[0] - l := v.Args[1] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORLmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) - // cond: canMergeLoad(v, l, x) && clobber(l) - // result: (ORLmem x [off] {sym} ptr mem) - for { - l := v.Args[0] - if l.Op != OpAMD64MOVLload { - break - } - off := l.AuxInt - sym := l.Aux - ptr := l.Args[0] - mem := l.Args[1] - x := v.Args[1] - if !(canMergeLoad(v, l, x) && clobber(l)) { - break - } - v.reset(OpAMD64ORLmem) - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { - // match: (ORLconst [c] x) - // cond: int32(c)==0 - // result: x - for { - c := v.AuxInt - x := v.Args[0] - if !(int32(c) == 0) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORLconst [c] _) - // cond: int32(c)==-1 - // result: (MOVLconst [-1]) - for { - c := v.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpAMD64MOVLconst) - v.AuxInt = -1 - return true - } - // match: (ORLconst [c] (MOVLconst [d])) - // cond: - // result: (MOVLconst [c|d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVLconst { - break - } - d := v_0.AuxInt - v.reset(OpAMD64MOVLconst) - v.AuxInt = c | d - return true - } - return false -} -func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (ORQ x (MOVQconst [c])) - // cond: is32Bit(c) - // result: (ORQconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ORQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ (MOVQconst [c]) x) - // cond: is32Bit(c) - // result: (ORQconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64MOVQconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(is32Bit(c)) { - break - } - v.reset(OpAMD64ORQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHLQconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHRQconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64SHRQconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 64-c) { - break - } - v.reset(OpAMD64ROLQconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORQ x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)) x0:(MOVBload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)) x0:(MOVWload [i0] {s} p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)) x0:(MOVLload [i0] {s} p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))) s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVBloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} p idx mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [8] x1:(MOVBloadidx1 [i1] {s} idx p mem)) x0:(MOVBloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVWloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} p idx mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [16] x1:(MOVWloadidx1 [i1] {s} idx p mem)) x0:(MOVWloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVLloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} p idx mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ x0:(MOVLloadidx1 [i0] {s} idx p mem) sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - x0 := v.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} p idx mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} p idx mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ sh:(SHLQconst [32] x1:(MOVLloadidx1 [i1] {s} idx p mem)) x0:(MOVLloadidx1 [i0] {s} idx p mem)) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVQloadidx1 [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - x1 := sh.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVWloadidx1 [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) - for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem)) or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - s1 := v.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value) bool { + // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(8 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreidx1(v *Value) bool { + // match: (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) + // cond: + // result: (MOVQstoreidx8 [c] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if v_1.AuxInt != 3 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem)) y) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value) bool { + // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s0 := or.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} p idx mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s0:(SHLQconst [j0] x0:(MOVWloadidx1 [i0] {s} idx p mem))) s1:(SHLQconst [j1] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j0] (MOVLloadidx1 [i0] {s} p idx mem)) y) + return false +} +func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool { + // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVSDload [off1+off2] {sym} ptr mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x1.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if mem != x1.Args[2] { + v.reset(OpAMD64MOVSDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpAMD64MOVSDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) + // match: (MOVSDload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBload { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if sh.AuxInt != 8 { + v.reset(OpAMD64MOVSDloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSDload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVSDloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if p != x0.Args[0] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if mem != x0.Args[1] { + v.reset(OpAMD64MOVSDloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDloadidx1(v *Value) bool { + // match: (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) + // cond: + // result: (MOVSDloadidx8 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if v_1.AuxInt != 3 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)) x1:(MOVBload [i1] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i0] {s} p mem)) + // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 8 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBload { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value) bool { + // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBload { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool { + // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVSDstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x1.Args[0] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if mem != x1.Args[1] { + v.reset(OpAMD64MOVSDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVSDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) + // match: (MOVSDstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + v.reset(OpAMD64MOVSDstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSDstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { break } - if sh.AuxInt != 16 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVSDstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if r0.AuxInt != 8 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { + v.reset(OpAMD64MOVSDstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstoreidx1(v *Value) bool { + // match: (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) + // cond: + // result: (MOVSDstoreidx8 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + if v_1.AuxInt != 3 { break } - if p != x0.Args[0] { + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if mem != x0.Args[1] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value) bool { + // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLload [i0] {s} p mem)) + return false +} +func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool { + // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVSSload [off1+off2] {sym} ptr mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpAMD64MOVSSload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if p != x1.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[1] { + v.reset(OpAMD64MOVSSload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVSSloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) + // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSSload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVSSloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if sh.AuxInt != 32 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + v.reset(OpAMD64MOVSSloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSloadidx1(v *Value) bool { + // match: (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) + // cond: + // result: (MOVSSloadidx4 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLload { + if v_1.AuxInt != 2 { break } - i0 := x0.AuxInt - if x0.Aux != s { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x0.Args[0] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if mem != x0.Args[1] { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value) bool { + // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))) r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQload [i0] {s} p mem)) + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool { + // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVSSstore [off1+off2] {sym} ptr val mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 32 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + v.reset(OpAMD64MOVSSstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { + v.reset(OpAMD64MOVSSstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpAMD64MOVSSstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVSSstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ4 { break } - if p != x1.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[1] { + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVSSstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVSSstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstoreidx1(v *Value) bool { + // match: (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) + // cond: + // result: (MOVSSstoreidx4 [c] {sym} ptr idx val mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if v_1.AuxInt != 2 { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value) bool { + // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if p != x1.Args[0] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if mem != x1.Args[1] { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWload { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) + // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { + x := v.Args[0] + if x.Op != OpAMD64MOVLload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) + // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWQSXload [off] {sym} ptr mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { + x := v.Args[0] + if x.Op != OpAMD64MOVQload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWQSXload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem))) s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWload [i0] {s} p mem))) y) + // match: (MOVWQSX (ANDLconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDLconst [c & 0x7fff] x) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ANDLconst { break } - if mem != x0.Args[1] { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpAMD64ANDLconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) + return true + } + // match: (MOVWQSX x:(MOVWQSX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWQSX { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) + // match: (MOVWQSX x:(MOVBQSX _)) + // cond: + // result: x for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { + x := v.Args[0] + if x.Op != OpAMD64MOVBQSX { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool { + // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - if p != x1.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[1] { + v.reset(OpAMD64MOVWQSXload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWload { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) + // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { + x := v.Args[0] + if x.Op != OpAMD64MOVLload { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) + // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { + x := v.Args[0] + if x.Op != OpAMD64MOVQload { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLload [i0] {s} p mem))) y) + // match: (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { + x := v.Args[0] + if x.Op != OpAMD64MOVWloadidx1 { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { + x := v.Args[0] + if x.Op != OpAMD64MOVWloadidx2 { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, v.Type) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWQZX (ANDLconst [c] x)) + // cond: + // result: (ANDLconst [c & 0xffff] x) for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ANDLconst { break } - if p != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpAMD64ANDLconst) + v.AuxInt = c & 0xffff + v.AddArg(x) + return true + } + // match: (MOVWQZX x:(MOVWQZX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVWQZX { break } - if idx != x0.Args[1] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWQZX x:(MOVBQZX _)) + // cond: + // result: x + for { + x := v.Args[0] + if x.Op != OpAMD64MOVBQZX { break } - if mem != x0.Args[2] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool { + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVWstore { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} p idx mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 8 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + v.reset(OpAMD64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - i0 := x0.AuxInt - if x0.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if idx != x0.Args[0] { + v.reset(OpAMD64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if p != x0.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x0.Args[2] { + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWloadidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ2 { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ x1:(MOVBloadidx1 [i1] {s} idx p mem) sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVWloadidx1 [off] {sym} ptr idx mem) for { - x1 := v.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if sh.AuxInt != 8 { + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpAMD64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if idx != x0.Args[0] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1 + off2)) { break } - if p != x0.Args[1] { + v.reset(OpAMD64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWloadidx1(v *Value) bool { + // match: (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) + // cond: + // result: (MOVWloadidx2 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if mem != x0.Args[2] { + if v_1.AuxInt != 1 { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 8 { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value) bool { + // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + 2*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool { + // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVWQSX { break } - i1 := x1.AuxInt - if x1.Aux != s { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVWQZX { break } - if p != x1.Args[0] { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if idx != x1.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - if mem != x1.Args[2] { + v.reset(OpAMD64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { + v.reset(OpAMD64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - i1 := x1.AuxInt - if x1.Aux != s { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if p != x1.Args[0] { + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx2 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ2 { break } - if idx != x1.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[2] { + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVWstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} p idx mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 8 { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if v_1.AuxInt != 16 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVWstore { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-2 { break } - if idx != x1.Args[0] { + if x.Aux != s { break } - if p != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + if w != x.Args[1] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [8] x0:(MOVBloadidx1 [i0] {s} idx p mem)) x1:(MOVBloadidx1 [i1] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstore [i-2] {s} p w0 mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { break } - if sh.AuxInt != 8 { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpAMD64MOVWstore { break } - x0 := sh.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if x.AuxInt != i-2 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpAMD64MOVBloadidx1 { + if x.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if idx != x1.Args[0] { + w0 := x.Args[1] + if w0.Op != OpAMD64SHRQconst { break } - if p != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = 8 - v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + v.reset(OpAMD64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - if sh.AuxInt != 16 { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1 + off2)) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + v.reset(OpAMD64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool { + // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if r0.AuxInt != 8 { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - if p != x0.Args[0] { + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ1 { break } - if idx != x0.Args[1] { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - if mem != x0.Args[2] { + v.reset(OpAMD64MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ2 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstoreconst [x] {sym} (ADDQ ptr idx) mem) + // cond: + // result: (MOVWstoreconstidx1 [x] {sym} ptr idx mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQ { break } - if r1.AuxInt != 8 { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + v.reset(OpAMD64MOVWstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconst [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVWstoreconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + a := x.AuxInt + if x.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + if p != x.Args[0] { break } - if sh.AuxInt != 16 { + mem := x.Args[1] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAL { break } - if r0.AuxInt != 8 { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDLconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - if p != x0.Args[0] { + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1(v *Value) bool { + // match: (MOVWstoreconstidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) + // cond: + // result: (MOVWstoreconstidx2 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if idx != x0.Args[1] { + if v_1.AuxInt != 1 { break } - if mem != x0.Args[2] { + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) return true } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstoreconstidx1 [c] {s} p i x:(MOVWstoreconstidx1 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p i mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVWstoreconstidx1 { + break + } + a := x.AuxInt + if x.Aux != s { break } - if r1.AuxInt != 8 { + if p != x.Args[0] { + break + } + if i != x.Args[1] { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(i) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 16 { + c := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + c := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(2 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx2 [c] {s} p i x:(MOVWstoreconstidx2 [a] {s} p i mem)) + // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVLstoreconstidx1 [makeValAndOff(ValAndOff(a).Val()&0xffff | ValAndOff(c).Val()<<16, ValAndOff(a).Off())] {s} p (SHLQconst [1] i) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + i := v.Args[1] + x := v.Args[2] + if x.Op != OpAMD64MOVWstoreconstidx2 { break } - if r0.AuxInt != 8 { + a := x.AuxInt + if x.Aux != s { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if i != x.Args[1] { break } - if idx != x0.Args[0] { + mem := x.Args[2] + if !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - if p != x0.Args[1] { + v.reset(OpAMD64MOVLstoreconstidx1) + v.AuxInt = makeValAndOff(ValAndOff(a).Val()&0xffff|ValAndOff(c).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, i.Type) + v0.AuxInt = 1 + v0.AddArg(i) + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreidx1(v *Value) bool { + // match: (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) + // cond: + // result: (MOVWstoreidx2 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if mem != x0.Args[2] { + if v_1.AuxInt != 1 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORQ r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) for { - r1 := v.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 16 { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx1 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + if v_2.AuxInt != 16 { break } - if r0.AuxInt != 8 { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx1 { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if x.AuxInt != i-2 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x.Aux != s { break } - if idx != x0.Args[0] { + if p != x.Args[0] { break } - if p != x0.Args[1] { + if idx != x.Args[1] { break } - if mem != x0.Args[2] { + if w != x.Args[2] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstoreidx1 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx1 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p idx w0 mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx1 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { + if x.AuxInt != i-2 { break } - if r1.AuxInt != 8 { + if x.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if p != x.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if p != x1.Args[0] { + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { break } - if idx != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if r0.AuxInt != 8 { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = c + 2*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [16] w) x:(MOVWstoreidx2 [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { + if v_2.AuxInt != 16 { break } - if r1.AuxInt != 8 { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx2 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x.AuxInt != i-2 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.Aux != s { break } - if p != x1.Args[0] { + if p != x.Args[0] { break } - if idx != x1.Args[1] { + if idx != x.Args[1] { break } - if mem != x1.Args[2] { + if w != x.Args[2] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + // match: (MOVWstoreidx2 [i] {s} p idx (SHRQconst [j] w) x:(MOVWstoreidx2 [i-2] {s} p idx w0:(SHRQconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVLstoreidx1 [i-2] {s} p (SHLQconst [1] idx) w0 mem) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 16 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64SHRQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpAMD64MOVWstoreidx2 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { + if x.AuxInt != i-2 { break } - if r1.AuxInt != 8 { + if x.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if p != x.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if idx != x1.Args[0] { + w0 := x.Args[2] + if w0.Op != OpAMD64SHRQconst { break } - if p != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) - v.reset(OpCopy) + v.reset(OpAMD64MOVLstoreidx1) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, idx.Type) + v0.AuxInt = 1 + v0.AddArg(idx) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORQ sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem)) + return false +} +func rewriteValueAMD64_OpAMD64MULL(v *Value) bool { + // match: (MULL x (MOVLconst [c])) + // cond: + // result: (MULLconst [c] x) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - if sh.AuxInt != 16 { + c := v_1.AuxInt + v.reset(OpAMD64MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULL (MOVLconst [c]) x) + // cond: + // result: (MULLconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64ROLWconst { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpAMD64MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool { + // match: (MULLconst [c] (MULLconst [d] x)) + // cond: + // result: (MULLconst [int64(int32(c * d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MULLconst { break } - if r0.AuxInt != 8 { + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpAMD64MULLconst) + v.AuxInt = int64(int32(c * d)) + v.AddArg(x) + return true + } + // match: (MULLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [int64(int32(c*d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + d := v_0.AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = int64(int32(c * d)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool { + // match: (MULQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (MULQconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64ROLWconst { + c := v_1.AuxInt + if !(is32Bit(c)) { break } - if r1.AuxInt != 8 { + v.reset(OpAMD64MULQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (MULQconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpAMD64MULQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool { + b := v.Block + _ = b + // match: (MULQconst [c] (MULQconst [d] x)) + // cond: is32Bit(c*d) + // result: (MULQconst [c * d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MULQconst { break } - if idx != x1.Args[0] { + d := v_0.AuxInt + x := v_0.Args[0] + if !(is32Bit(c * d)) { break } - if p != x1.Args[1] { + v.reset(OpAMD64MULQconst) + v.AuxInt = c * d + v.AddArg(x) + return true + } + // match: (MULQconst [-1] x) + // cond: + // result: (NEGQ x) + for { + if v.AuxInt != -1 { break } - if mem != x1.Args[2] { + x := v.Args[0] + v.reset(OpAMD64NEGQ) + v.AddArg(x) + return true + } + // match: (MULQconst [0] _) + // cond: + // result: (MOVQconst [0]) + for { + if v.AuxInt != 0 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + // match: (MULQconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) + x := v.Args[0] v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (MULQconst [3] x) + // cond: + // result: (LEAQ2 x x) for { - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { + if v.AuxInt != 3 { break } - if mem != x0.Args[2] { + x := v.Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [5] x) + // cond: + // result: (LEAQ4 x x) + for { + if v.AuxInt != 5 { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x := v.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [7] x) + // cond: + // result: (LEAQ8 (NEGQ x) x) + for { + if v.AuxInt != 7 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, v.Type) + v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.AddArg(x) return true } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (MULQconst [9] x) + // cond: + // result: (LEAQ8 x x) for { - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { - break - } - if sh.AuxInt != 32 { - break - } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + if v.AuxInt != 9 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [11] x) + // cond: + // result: (LEAQ2 x (LEAQ4 x x)) + for { + if v.AuxInt != 11 { break } - if p != x0.Args[0] { + x := v.Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [13] x) + // cond: + // result: (LEAQ4 x (LEAQ2 x x)) + for { + if v.AuxInt != 13 { break } - if idx != x0.Args[1] { + x := v.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [21] x) + // cond: + // result: (LEAQ4 x (LEAQ4 x x)) + for { + if v.AuxInt != 21 { break } - if mem != x0.Args[2] { + x := v.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [25] x) + // cond: + // result: (LEAQ8 x (LEAQ2 x x)) + for { + if v.AuxInt != 25 { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [37] x) + // cond: + // result: (LEAQ4 x (LEAQ8 x x)) + for { + if v.AuxInt != 37 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) + x := v.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) return true } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (MULQconst [41] x) + // cond: + // result: (LEAQ8 x (LEAQ4 x x)) for { - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { + if v.AuxInt != 41 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [73] x) + // cond: + // result: (LEAQ8 x (LEAQ8 x x)) + for { + if v.AuxInt != 73 { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SHLQconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { break } - if sh.AuxInt != 32 { + v.reset(OpAMD64SHLQconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUBQ (SHLQconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + v.reset(OpAMD64SUBQ) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (LEAQ1 (SHLQconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + v.reset(OpAMD64LEAQ1) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c-2) && c >= 34 + // result: (LEAQ2 (SHLQconst [log2(c-2)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-2) && c >= 34) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpAMD64LEAQ2) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = log2(c - 2) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c-4) && c >= 68 + // result: (LEAQ4 (SHLQconst [log2(c-4)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-4) && c >= 68) { break } - if idx != x0.Args[0] { + v.reset(OpAMD64LEAQ4) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = log2(c - 4) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c-8) && c >= 136 + // result: (LEAQ8 (SHLQconst [log2(c-8)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-8) && c >= 136) { break } - if p != x0.Args[1] { + v.reset(OpAMD64LEAQ8) + v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) + v0.AuxInt = log2(c - 8) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) + // result: (SHLQconst [log2(c/3)] (LEAQ2 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } - if mem != x0.Args[2] { + v.reset(OpAMD64SHLQconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) + // result: (SHLQconst [log2(c/5)] (LEAQ4 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + v.reset(OpAMD64SHLQconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULQconst [c] x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SHLQconst [log2(c/9)] (LEAQ8 x x)) + for { + c := v.AuxInt + x := v.Args[0] + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) + v.reset(OpAMD64SHLQconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type) + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) return true } - // match: (ORQ r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (MULQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c*d]) for { - r1 := v.Args[0] - if r1.Op != OpAMD64BSWAPL { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + d := v_0.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool { + // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (MULSDmem x [off] {sym} ptr mem) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAMD64MOVSDload { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpAMD64SHLQconst { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - if sh.AuxInt != 32 { + v.reset(OpAMD64MULSDmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULSD l:(MOVSDload [off] {sym} ptr mem) x) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (MULSDmem x [off] {sym} ptr mem) + for { + l := v.Args[0] + if l.Op != OpAMD64MOVSDload { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + v.reset(OpAMD64MULSDmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool { + // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (MULSSmem x [off] {sym} ptr mem) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAMD64MOVSSload { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - if idx != x0.Args[0] { + v.reset(OpAMD64MULSSmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULSS l:(MOVSSload [off] {sym} ptr mem) x) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (MULSSmem x [off] {sym} ptr mem) + for { + l := v.Args[0] + if l.Op != OpAMD64MOVSSload { break } - if p != x0.Args[1] { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - if mem != x0.Args[2] { + v.reset(OpAMD64MULSSmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool { + // match: (NEGL (MOVLconst [c])) + // cond: + // result: (MOVLconst [int64(int32(-c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + c := v_0.AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = int64(int32(-c)) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool { + // match: (NEGQ (MOVQconst [c])) + // cond: + // result: (MOVQconst [-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + c := v_0.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = -c return true } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (NEGQ (ADDQconst [c] (NEGQ x))) + // cond: c != -(1<<31) + // result: (ADDQconst [-c] x) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { break } - if sh.AuxInt != 32 { + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAMD64NEGQ { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + x := v_0_0.Args[0] + if !(c != -(1 << 31)) { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + v.reset(OpAMD64ADDQconst) + v.AuxInt = -c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool { + // match: (NOTL (MOVLconst [c])) + // cond: + // result: (MOVLconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { + c := v_0.AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool { + // match: (NOTQ (MOVQconst [c])) + // cond: + // result: (MOVQconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + c := v_0.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORL(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (ORL x (MOVLconst [c])) + // cond: + // result: (ORLconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + c := v_1.AuxInt + v.reset(OpAMD64ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORL (MOVLconst [c]) x) + // cond: + // result: (ORLconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - if p != x1.Args[0] { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpAMD64ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHLLconst x [c]) (SHRLconst x [32-c])) + // cond: + // result: (ROLLconst x [ c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHLLconst { break } - if idx != x1.Args[1] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRLconst { break } - if mem != x1.Args[2] { + if v_1.AuxInt != 32-c { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if x != v_1.Args[0] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpAMD64ROLLconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: ( ORL (SHRLconst x [c]) (SHLLconst x [32-c])) + // cond: + // result: (ROLLconst x [32-c]) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHRLconst { break } - if sh.AuxInt != 32 { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLLconst { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + if v_1.AuxInt != 32-c { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + if x != v_1.Args[0] { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { + v.reset(OpAMD64ROLLconst) + v.AuxInt = 32 - c + v.AddArg(x) + return true + } + // match: ( ORL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHLLconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRWconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + if v_1.AuxInt != 16-c { break } - if p != x1.Args[0] { + if x != v_1.Args[0] { break } - if idx != x1.Args[1] { + if !(c < 16 && t.Size() == 2) { break } - if mem != x1.Args[2] { + v.reset(OpAMD64ROLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHRWconst { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLLconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + if v_1.AuxInt != 16-c { + break + } + if x != v_1.Args[0] { + break + } + if !(c > 0 && t.Size() == 2) { + break + } + v.reset(OpAMD64ROLWconst) + v.AuxInt = 16 - c + v.AddArg(x) return true } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} p idx mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: ( ORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHLLconst { break } - if sh.AuxInt != 32 { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRBconst { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + if v_1.AuxInt != 8-c { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + if x != v_1.Args[0] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { + if !(c < 8 && t.Size() == 1) { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + v.reset(OpAMD64ROLBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHRBconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLLconst { break } - if idx != x1.Args[0] { + if v_1.AuxInt != 8-c { break } - if p != x1.Args[1] { + if x != v_1.Args[0] { break } - if mem != x1.Args[2] { + if !(c > 0 && t.Size() == 1) { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + v.reset(OpAMD64ROLBconst) + v.AuxInt = 8 - c + v.AddArg(x) + return true + } + // match: (ORL x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORQ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLloadidx1 [i0] {s} idx p mem))) r1:(BSWAPL x1:(MOVLloadidx1 [i1] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (BSWAPQ (MOVQloadidx1 [i0] {s} p idx mem)) + // match: (ORL x:(SHLLconst _) y) + // cond: y.Op != OpAMD64SHLLconst + // result: (ORL y x) for { - sh := v.Args[0] - if sh.Op != OpAMD64SHLQconst { + x := v.Args[0] + if x.Op != OpAMD64SHLLconst { break } - if sh.AuxInt != 32 { + y := v.Args[1] + if !(y.Op != OpAMD64SHLLconst) { break } - r0 := sh.Args[0] - if r0.Op != OpAMD64BSWAPL { + v.reset(OpAMD64ORL) + v.AddArg(y) + v.AddArg(x) + return true + } + // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p mem) + for { + x0 := v.Args[0] + if x0.Op != OpAMD64MOVBload { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVLloadidx1 { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - r1 := v.Args[1] - if r1.Op != OpAMD64BSWAPL { + if s0.AuxInt != 8 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVLloadidx1 { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x1.AuxInt != i+1 { break } - if idx != x1.Args[0] { + if x1.Aux != s { break } - if p != x1.Args[1] { + if p != x1.Args[0] { break } - if mem != x1.Args[2] { + if mem != x1.Args[1] { break } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, types.UInt64) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i+2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i+3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + o0 := v.Args[0] + if o0.Op != OpAMD64ORL { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + x0 := o0.Args[0] + if x0.Op != OpAMD64MOVWload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { - break - } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { - break - } - i1 := x1.AuxInt - if x1.Aux != s { + mem := x0.Args[1] + s0 := o0.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - if p != x1.Args[0] { + if s0.AuxInt != 16 { break } - if idx != x1.Args[1] { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - if mem != x1.Args[2] { + if x1.AuxInt != i+2 { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x1.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if p != x1.Args[0] { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if mem != x1.Args[1] { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + s1 := v.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if s1.AuxInt != 24 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBload { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x2.AuxInt != i+3 { break } - if p != x1.Args[0] { + if x2.Aux != s { break } - if idx != x1.Args[1] { + if p != x2.Args[0] { break } - if mem != x1.Args[2] { + if mem != x2.Args[1] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVWloadidx1 [i] {s} p idx mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] + x0 := v.Args[0] if x0.Op != OpAMD64MOVBloadidx1 { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if s0.AuxInt != 8 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } - i1 := x1.AuxInt + if x1.AuxInt != i+1 { + break + } if x1.Aux != s { break } - if idx != x1.Args[0] { + if p != x1.Args[0] { break } - if p != x1.Args[1] { + if idx != x1.Args[1] { break } if mem != x1.Args[2] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL o0:(ORL x0:(MOVWloadidx1 [i] {s} p idx mem) s0:(SHLLconst [16] x1:(MOVBloadidx1 [i+2] {s} p idx mem))) s1:(SHLLconst [24] x2:(MOVBloadidx1 [i+3] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVLloadidx1 [i] {s} p idx mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + o0 := v.Args[0] + if o0.Op != OpAMD64ORL { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + x0 := o0.Args[0] + if x0.Op != OpAMD64MOVWloadidx1 { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] + p := x0.Args[0] + idx := x0.Args[1] mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + s0 := o0.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if s0.AuxInt != 16 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { + if x1.AuxInt != i+2 { break } - if p != x1.Args[1] { + if x1.Aux != s { break } - if mem != x1.Args[2] { + if p != x1.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x1.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if mem != x1.Args[2] { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + s1 := v.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if s1.AuxInt != 24 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBloadidx1 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + if x2.AuxInt != i+3 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x2.Aux != s { break } - if p != x1.Args[0] { + if p != x2.Args[0] { break } - if idx != x1.Args[1] { + if idx != x2.Args[1] { break } - if mem != x1.Args[2] { + if mem != x2.Args[2] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL x0:(MOVBload [i] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i-1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWload [i-1] {s} p mem)) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + x0 := v.Args[0] + if x0.Op != OpAMD64MOVBload { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if s0.AuxInt != 8 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + if x1.AuxInt != i-1 { break } - i1 := x1.AuxInt if x1.Aux != s { break } if p != x1.Args[0] { break } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + if mem != x1.Args[1] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v0.AuxInt = 8 + v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16) + v1.AuxInt = i - 1 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLLconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (ROLWconst [8] (MOVWloadidx1 [i-1] {s} p idx mem)) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] + x0 := v.Args[0] if x0.Op != OpAMD64MOVBloadidx1 { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + s0 := v.Args[1] + if s0.Op != OpAMD64SHLLconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if s0.AuxInt != 8 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] + x1 := s0.Args[0] if x1.Op != OpAMD64MOVBloadidx1 { break } - i1 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } - if idx != x1.Args[0] { + if p != x1.Args[0] { break } - if p != x1.Args[1] { + if idx != x1.Args[1] { break } if mem != x1.Args[2] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + v0 := b.NewValue0(v.Pos, OpAMD64ROLWconst, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v0.AuxInt = 8 + v1 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, v.Type) + v1.AuxInt = i - 1 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem)) or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWload [i1] {s} p mem)) s1:(SHLLconst [16] x2:(MOVBload [i1-1] {s} p mem))) s2:(SHLLconst [24] x3:(MOVBload [i1-2] {s} p mem))) + // cond: x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x01,x2,x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x01,x2,x3) (BSWAPL (MOVLload [i1-2] {s} p mem)) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + o1 := v.Args[0] + if o1.Op != OpAMD64ORL { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + o0 := o1.Args[0] + if o0.Op != OpAMD64ROLWconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if o0.AuxInt != 8 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + x01 := o0.Args[0] + if x01.Op != OpAMD64MOVWload { break } - i1 := x1.AuxInt - if x1.Aux != s { + i1 := x01.AuxInt + s := x01.Aux + p := x01.Args[0] + mem := x01.Args[1] + s1 := o1.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - if idx != x1.Args[0] { + if s1.AuxInt != 16 { break } - if p != x1.Args[1] { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBload { break } - if mem != x1.Args[2] { + if x2.AuxInt != i1-1 { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x2.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if p != x2.Args[0] { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if mem != x2.Args[1] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + s2 := v.Args[1] + if s2.Op != OpAMD64SHLLconst { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if s2.AuxInt != 24 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x3.AuxInt != i1-2 { break } - if p != x0.Args[0] { + if x3.Aux != s { break } - if idx != x0.Args[1] { + if p != x3.Args[0] { break } - if mem != x0.Args[2] { + if mem != x3.Args[1] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x01, x2, x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = mergePoint(b, x01, x2, x3) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32) + v1.AuxInt = i1 - 2 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL o1:(ORL o0:(ROLWconst [8] x01:(MOVWloadidx1 [i1] {s} p idx mem)) s1:(SHLLconst [16] x2:(MOVBloadidx1 [i1-1] {s} p idx mem))) s2:(SHLLconst [24] x3:(MOVBloadidx1 [i1-2] {s} p idx mem))) + // cond: x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b,x01,x2,x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1) + // result: @mergePoint(b,x01,x2,x3) (BSWAPL (MOVLloadidx1 [i1-2] {s} p idx mem)) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + o1 := v.Args[0] + if o1.Op != OpAMD64ORL { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + o0 := o1.Args[0] + if o0.Op != OpAMD64ROLWconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + if o0.AuxInt != 8 { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + x01 := o0.Args[0] + if x01.Op != OpAMD64MOVWloadidx1 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + i1 := x01.AuxInt + s := x01.Aux + p := x01.Args[0] + idx := x01.Args[1] + mem := x01.Args[2] + s1 := o1.Args[1] + if s1.Op != OpAMD64SHLLconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + if s1.AuxInt != 16 { break } - if p != x0.Args[0] { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBloadidx1 { break } - if idx != x0.Args[1] { + if x2.AuxInt != i1-1 { break } - if mem != x0.Args[2] { + if x2.Aux != s { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if p != x2.Args[0] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if idx != x2.Args[1] { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if mem != x2.Args[2] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + s2 := v.Args[1] + if s2.Op != OpAMD64SHLLconst { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if s2.AuxInt != 24 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBloadidx1 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x3.AuxInt != i1-2 { break } - if p != x0.Args[0] { + if x3.Aux != s { break } - if idx != x0.Args[1] { + if p != x3.Args[0] { break } - if mem != x0.Args[2] { + if idx != x3.Args[1] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x3.Args[2] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + if !(x01.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && mergePoint(b, x01, x2, x3) != nil && clobber(x01) && clobber(x2) && clobber(x3) && clobber(s1) && clobber(s2) && clobber(o0) && clobber(o1)) { + break + } + b = mergePoint(b, x01, x2, x3) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, v.Type) + v1.AuxInt = i1 - 2 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORL x l:(MOVLload [off] {sym} ptr mem)) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (ORLmem x [off] {sym} ptr mem) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAMD64MOVLload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + v.reset(OpAMD64ORLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORL l:(MOVLload [off] {sym} ptr mem) x) + // cond: canMergeLoad(v, l, x) && clobber(l) + // result: (ORLmem x [off] {sym} ptr mem) + for { + l := v.Args[0] + if l.Op != OpAMD64MOVLload { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + off := l.AuxInt + sym := l.Aux + ptr := l.Args[0] + mem := l.Args[1] + x := v.Args[1] + if !(canMergeLoad(v, l, x) && clobber(l)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpAMD64ORLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool { + // match: (ORLconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { break } - if p != x0.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORLconst [c] _) + // cond: int32(c)==-1 + // result: (MOVLconst [-1]) + for { + c := v.AuxInt + if !(int32(c) == -1) { break } - if idx != x0.Args[1] { + v.reset(OpAMD64MOVLconst) + v.AuxInt = -1 + return true + } + // match: (ORLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c|d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLconst { break } - if mem != x0.Args[2] { + d := v_0.AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (ORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ORQconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQconst { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v_1.AuxInt + if !(is32Bit(c)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ORQconst [c] x) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVQconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORQ (SHLQconst x [c]) (SHRQconst x [64-c])) + // cond: + // result: (ROLQconst x [ c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHLQconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHRQconst { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if v_1.AuxInt != 64-c { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if x != v_1.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpAMD64ROLQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORQ (SHRQconst x [c]) (SHLQconst x [64-c])) + // cond: + // result: (ROLQconst x [64-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHRQconst { break } - if idx != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { break } - if p != x0.Args[1] { + if v_1.AuxInt != 64-c { break } - if mem != x0.Args[2] { + if x != v_1.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpAMD64ROLQconst) + v.AuxInt = 64 - c + v.AddArg(x) + return true + } + // match: (ORQ x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem)) y) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) + // match: (ORQ x:(SHLQconst _) y) + // cond: y.Op != OpAMD64SHLQconst + // result: (ORQ y x) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + x := v.Args[0] + if x.Op != OpAMD64SHLQconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + y := v.Args[1] + if !(y.Op != OpAMD64SHLQconst) { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + v.reset(OpAMD64ORQ) + v.AddArg(y) + v.AddArg(x) + return true + } + // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i+1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i+2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i+3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i+4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i+5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i+6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i+7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQload [i] {s} p mem) + for { + o0 := v.Args[0] + if o0.Op != OpAMD64ORQ { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORQ { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + o2 := o1.Args[0] + if o2.Op != OpAMD64ORQ { break } - i0 := x0.AuxInt - if x0.Aux != s { + o3 := o2.Args[0] + if o3.Op != OpAMD64ORQ { break } - if idx != x0.Args[0] { + o4 := o3.Args[0] + if o4.Op != OpAMD64ORQ { break } - if p != x0.Args[1] { + o5 := o4.Args[0] + if o5.Op != OpAMD64ORQ { break } - if mem != x0.Args[2] { + x0 := o5.Args[0] + if x0.Op != OpAMD64MOVBload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o5.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} p idx mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if s0.AuxInt != 8 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + if x1.AuxInt != i+1 { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if x1.Aux != s { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if p != x1.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if mem != x1.Args[1] { break } - if idx != x0.Args[0] { + s1 := o4.Args[1] + if s1.Op != OpAMD64SHLQconst { break } - if p != x0.Args[1] { + if s1.AuxInt != 16 { break } - if mem != x0.Args[2] { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x2.AuxInt != i+2 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] x1:(MOVBloadidx1 [i1] {s} idx p mem))) s0:(SHLQconst [j0] x0:(MOVBloadidx1 [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (ROLWconst [8] (MOVWloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if x2.Aux != s { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if p != x2.Args[0] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpAMD64MOVBloadidx1 { + if mem != x2.Args[1] { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + s2 := o3.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpAMD64MOVBloadidx1 { + if s2.AuxInt != 24 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBload { break } - if idx != x0.Args[0] { + if x3.AuxInt != i+3 { break } - if p != x0.Args[1] { + if x3.Aux != s { break } - if mem != x0.Args[2] { + if p != x3.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x3.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64ROLWconst, types.UInt16) - v2.AuxInt = 8 - v3 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + s3 := o2.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if s3.AuxInt != 32 { break } - if r0.AuxInt != 8 { + x4 := s3.Args[0] + if x4.Op != OpAMD64MOVBload { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if x4.AuxInt != i+4 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if x4.Aux != s { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if p != x4.Args[0] { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if mem != x4.Args[1] { break } - if r1.AuxInt != 8 { + s4 := o1.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if s4.AuxInt != 40 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x5 := s4.Args[0] + if x5.Op != OpAMD64MOVBload { break } - if p != x1.Args[0] { + if x5.AuxInt != i+5 { break } - if idx != x1.Args[1] { + if x5.Aux != s { break } - if mem != x1.Args[2] { + if p != x5.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x5.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + s5 := o0.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if s5.AuxInt != 48 { break } - if r0.AuxInt != 8 { + x6 := s5.Args[0] + if x6.Op != OpAMD64MOVBload { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if x6.AuxInt != i+6 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if x6.Aux != s { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if p != x6.Args[0] { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if mem != x6.Args[1] { break } - if r1.AuxInt != 8 { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if s6.AuxInt != 56 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x7 := s6.Args[0] + if x7.Op != OpAMD64MOVBload { break } - if p != x1.Args[0] { + if x7.AuxInt != i+7 { break } - if idx != x1.Args[1] { + if x7.Aux != s { break } - if mem != x1.Args[2] { + if p != x7.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x7.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) + // match: (ORQ o0:(ORQ o1:(ORQ o2:(ORQ o3:(ORQ o4:(ORQ o5:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i+1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i+2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i+3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i+4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i+5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i+6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i+7] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVQloadidx1 [i] {s} p idx mem) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + o0 := v.Args[0] + if o0.Op != OpAMD64ORQ { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + o1 := o0.Args[0] + if o1.Op != OpAMD64ORQ { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + o2 := o1.Args[0] + if o2.Op != OpAMD64ORQ { break } - if r1.AuxInt != 8 { + o3 := o2.Args[0] + if o3.Op != OpAMD64ORQ { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + o4 := o3.Args[0] + if o4.Op != OpAMD64ORQ { break } - i1 := x1.AuxInt - if x1.Aux != s { + o5 := o4.Args[0] + if o5.Op != OpAMD64ORQ { break } - if idx != x1.Args[0] { + x0 := o5.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { break } - if p != x1.Args[1] { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o5.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - if mem != x1.Args[2] { + if s0.AuxInt != 8 { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if x1.AuxInt != i+1 { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if x1.Aux != s { break } - if r0.AuxInt != 8 { + if p != x1.Args[0] { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if idx != x1.Args[1] { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if mem != x1.Args[2] { break } - s1 := or.Args[0] + s1 := o4.Args[1] if s1.Op != OpAMD64SHLQconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if s1.AuxInt != 16 { break } - if r1.AuxInt != 8 { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBloadidx1 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x2.AuxInt != i+2 { break } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { + if x2.Aux != s { break } - if mem != x1.Args[2] { + if p != x2.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x2.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if mem != x2.Args[2] { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + s2 := o3.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - if r0.AuxInt != 8 { + if s2.AuxInt != 24 { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBloadidx1 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if x3.AuxInt != i+3 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if x3.Aux != s { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if p != x3.Args[0] { break } - if r1.AuxInt != 8 { + if idx != x3.Args[1] { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if mem != x3.Args[2] { break } - i1 := x1.AuxInt - if x1.Aux != s { + s3 := o2.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - if p != x1.Args[0] { + if s3.AuxInt != 32 { break } - if idx != x1.Args[1] { + x4 := s3.Args[0] + if x4.Op != OpAMD64MOVBloadidx1 { break } - if mem != x1.Args[2] { + if x4.AuxInt != i+4 { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x4.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if p != x4.Args[0] { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if idx != x4.Args[1] { break } - if r0.AuxInt != 8 { + if mem != x4.Args[2] { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + s4 := o1.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if s4.AuxInt != 40 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + x5 := s4.Args[0] + if x5.Op != OpAMD64MOVBloadidx1 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if x5.AuxInt != i+5 { break } - if r1.AuxInt != 8 { + if x5.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if p != x5.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x5.Args[1] { break } - if p != x1.Args[0] { + if mem != x5.Args[2] { break } - if idx != x1.Args[1] { + s5 := o0.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - if mem != x1.Args[2] { + if s5.AuxInt != 48 { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + x6 := s5.Args[0] + if x6.Op != OpAMD64MOVBloadidx1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + if x6.AuxInt != i+6 { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if x6.Aux != s { break } - if r0.AuxInt != 8 { + if p != x6.Args[0] { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if idx != x6.Args[1] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + if mem != x6.Args[2] { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if s6.AuxInt != 56 { break } - if r1.AuxInt != 8 { + x7 := s6.Args[0] + if x7.Op != OpAMD64MOVBloadidx1 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x7.AuxInt != i+7 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x7.Aux != s { break } - if idx != x1.Args[0] { + if p != x7.Args[0] { break } - if p != x1.Args[1] { + if idx != x7.Args[1] { break } - if mem != x1.Args[2] { + if mem != x7.Args[2] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem))) or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) + // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBload [i] {s} p mem) s0:(SHLQconst [8] x1:(MOVBload [i-1] {s} p mem))) s1:(SHLQconst [16] x2:(MOVBload [i-2] {s} p mem))) s2:(SHLQconst [24] x3:(MOVBload [i-3] {s} p mem))) s3:(SHLQconst [32] x4:(MOVBload [i-4] {s} p mem))) s4:(SHLQconst [40] x5:(MOVBload [i-5] {s} p mem))) s5:(SHLQconst [48] x6:(MOVBload [i-6] {s} p mem))) s6:(SHLQconst [56] x7:(MOVBload [i-7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ (MOVQload [i-7] {s} p mem)) for { - s0 := v.Args[0] - if s0.Op != OpAMD64SHLQconst { + o5 := v.Args[0] + if o5.Op != OpAMD64ORQ { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + o4 := o5.Args[0] + if o4.Op != OpAMD64ORQ { break } - if r0.AuxInt != 8 { + o3 := o4.Args[0] + if o3.Op != OpAMD64ORQ { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + o2 := o3.Args[0] + if o2.Op != OpAMD64ORQ { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpAMD64ORQ { + o1 := o2.Args[0] + if o1.Op != OpAMD64ORQ { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + o0 := o1.Args[0] + if o0.Op != OpAMD64ORQ { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + x0 := o0.Args[0] + if x0.Op != OpAMD64MOVBload { break } - if r1.AuxInt != 8 { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if s0.AuxInt != 8 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBload { break } - if idx != x1.Args[0] { + if x1.AuxInt != i-1 { break } - if p != x1.Args[1] { + if x1.Aux != s { break } - if mem != x1.Args[2] { + if p != x1.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x1.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + s1 := o1.Args[1] + if s1.Op != OpAMD64SHLQconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if s1.AuxInt != 16 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBload { break } - if r1.AuxInt != 8 { + if x2.AuxInt != i-2 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x2.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if p != x2.Args[0] { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if mem != x2.Args[1] { break } - if r0.AuxInt != 8 { + s2 := o2.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if s2.AuxInt != 24 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBload { break } - if p != x0.Args[0] { + if x3.AuxInt != i-3 { break } - if idx != x0.Args[1] { + if x3.Aux != s { break } - if mem != x0.Args[2] { + if p != x3.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x3.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + s3 := o3.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + if s3.AuxInt != 32 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + x4 := s3.Args[0] + if x4.Op != OpAMD64MOVBload { break } - if r1.AuxInt != 8 { + if x4.AuxInt != i-4 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x4.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if p != x4.Args[0] { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if mem != x4.Args[1] { break } - if r0.AuxInt != 8 { + s4 := o4.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if s4.AuxInt != 40 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x5 := s4.Args[0] + if x5.Op != OpAMD64MOVBload { break } - if p != x0.Args[0] { + if x5.AuxInt != i-5 { break } - if idx != x0.Args[1] { + if x5.Aux != s { break } - if mem != x0.Args[2] { + if p != x5.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x5.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + s5 := o5.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if s5.AuxInt != 48 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + x6 := s5.Args[0] + if x6.Op != OpAMD64MOVBload { break } - if r1.AuxInt != 8 { + if x6.AuxInt != i-6 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x6.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if p != x6.Args[0] { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if mem != x6.Args[1] { break } - if r0.AuxInt != 8 { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if s6.AuxInt != 56 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x7 := s6.Args[0] + if x7.Op != OpAMD64MOVBload { break } - if p != x0.Args[0] { + if x7.AuxInt != i-7 { break } - if idx != x0.Args[1] { + if x7.Aux != s { break } - if mem != x0.Args[2] { + if p != x7.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x7.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64) + v1.AuxInt = i - 7 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) + // match: (ORQ o5:(ORQ o4:(ORQ o3:(ORQ o2:(ORQ o1:(ORQ o0:(ORQ x0:(MOVBloadidx1 [i] {s} p idx mem) s0:(SHLQconst [8] x1:(MOVBloadidx1 [i-1] {s} p idx mem))) s1:(SHLQconst [16] x2:(MOVBloadidx1 [i-2] {s} p idx mem))) s2:(SHLQconst [24] x3:(MOVBloadidx1 [i-3] {s} p idx mem))) s3:(SHLQconst [32] x4:(MOVBloadidx1 [i-4] {s} p idx mem))) s4:(SHLQconst [40] x5:(MOVBloadidx1 [i-5] {s} p idx mem))) s5:(SHLQconst [48] x6:(MOVBloadidx1 [i-6] {s} p idx mem))) s6:(SHLQconst [56] x7:(MOVBloadidx1 [i-7] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (BSWAPQ (MOVQloadidx1 [i-7] {s} p idx mem)) for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + o5 := v.Args[0] + if o5.Op != OpAMD64ORQ { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + o4 := o5.Args[0] + if o4.Op != OpAMD64ORQ { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + o3 := o4.Args[0] + if o3.Op != OpAMD64ORQ { break } - if r1.AuxInt != 8 { + o2 := o3.Args[0] + if o2.Op != OpAMD64ORQ { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + o1 := o2.Args[0] + if o1.Op != OpAMD64ORQ { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + o0 := o1.Args[0] + if o0.Op != OpAMD64ORQ { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + x0 := o0.Args[0] + if x0.Op != OpAMD64MOVBloadidx1 { break } - if r0.AuxInt != 8 { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o0.Args[1] + if s0.Op != OpAMD64SHLQconst { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if s0.AuxInt != 8 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x1 := s0.Args[0] + if x1.Op != OpAMD64MOVBloadidx1 { break } - if p != x0.Args[0] { + if x1.AuxInt != i-1 { break } - if idx != x0.Args[1] { + if x1.Aux != s { break } - if mem != x0.Args[2] { + if p != x1.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x1.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if mem != x1.Args[2] { break } - s1 := or.Args[0] + s1 := o1.Args[1] if s1.Op != OpAMD64SHLQconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { - break - } - if r1.AuxInt != 8 { - break - } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { - break - } - if r0.AuxInt != 8 { - break - } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if s1.AuxInt != 16 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x2 := s1.Args[0] + if x2.Op != OpAMD64MOVBloadidx1 { break } - if idx != x0.Args[0] { + if x2.AuxInt != i-2 { break } - if p != x0.Args[1] { + if x2.Aux != s { break } - if mem != x0.Args[2] { + if p != x2.Args[0] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x2.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem))) y) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if mem != x2.Args[2] { break } - s1 := or.Args[0] - if s1.Op != OpAMD64SHLQconst { + s2 := o2.Args[1] + if s2.Op != OpAMD64SHLQconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if s2.AuxInt != 24 { break } - if r1.AuxInt != 8 { + x3 := s2.Args[0] + if x3.Op != OpAMD64MOVBloadidx1 { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if x3.AuxInt != i-3 { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + if x3.Aux != s { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if p != x3.Args[0] { break } - if r0.AuxInt != 8 { + if idx != x3.Args[1] { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if mem != x3.Args[2] { break } - i0 := x0.AuxInt - if x0.Aux != s { + s3 := o3.Args[1] + if s3.Op != OpAMD64SHLQconst { break } - if idx != x0.Args[0] { + if s3.AuxInt != 32 { break } - if p != x0.Args[1] { + x4 := s3.Args[0] + if x4.Op != OpAMD64MOVBloadidx1 { break } - if mem != x0.Args[2] { + if x4.AuxInt != i-4 { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x4.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} p idx mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if p != x4.Args[0] { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if idx != x4.Args[1] { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if mem != x4.Args[2] { break } - if r1.AuxInt != 8 { + s4 := o4.Args[1] + if s4.Op != OpAMD64SHLQconst { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if s4.AuxInt != 40 { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + x5 := s4.Args[0] + if x5.Op != OpAMD64MOVBloadidx1 { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if x5.AuxInt != i-5 { break } - if r0.AuxInt != 8 { + if x5.Aux != s { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if p != x5.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if idx != x5.Args[1] { break } - if idx != x0.Args[0] { + if mem != x5.Args[2] { break } - if p != x0.Args[1] { + s5 := o5.Args[1] + if s5.Op != OpAMD64SHLQconst { break } - if mem != x0.Args[2] { + if s5.AuxInt != 48 { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + x6 := s5.Args[0] + if x6.Op != OpAMD64MOVBloadidx1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORQ or:(ORQ y s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWloadidx1 [i1] {s} idx p mem)))) s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWloadidx1 [i0] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORQ (SHLQconst [j1] (BSWAPL (MOVLloadidx1 [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpAMD64ORQ { + if x6.AuxInt != i-6 { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpAMD64SHLQconst { + if x6.Aux != s { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpAMD64ROLWconst { + if p != x6.Args[0] { break } - if r1.AuxInt != 8 { + if idx != x6.Args[1] { break } - x1 := r1.Args[0] - if x1.Op != OpAMD64MOVWloadidx1 { + if mem != x6.Args[2] { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpAMD64SHLQconst { + s6 := v.Args[1] + if s6.Op != OpAMD64SHLQconst { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpAMD64ROLWconst { + if s6.AuxInt != 56 { break } - if r0.AuxInt != 8 { + x7 := s6.Args[0] + if x7.Op != OpAMD64MOVBloadidx1 { break } - x0 := r0.Args[0] - if x0.Op != OpAMD64MOVWloadidx1 { + if x7.AuxInt != i-7 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x7.Aux != s { break } - if idx != x0.Args[0] { + if p != x7.Args[0] { break } - if p != x0.Args[1] { + if idx != x7.Args[1] { break } - if mem != x0.Args[2] { + if mem != x7.Args[2] { break } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpAMD64ORQ, v.Type) + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpAMD64BSWAPL, types.UInt32) - v3 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, v.Type) + v1.AuxInt = i - 7 + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } // match: (ORQ x l:(MOVQload [off] {sym} ptr mem)) @@ -82804,37 +14900,6 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v.AddArg(v0) return true } - // match: (SETEQ (TESTL y (SHLL (MOVLconst [1]) x))) - // cond: !config.nacl - // result: (SETAE (BTL x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst { - break - } - if v_0_1_0.AuxInt != 1 { - break - } - x := v_0_1.Args[1] - if !(!config.nacl) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) // cond: !config.nacl // result: (SETAE (BTQ x y)) @@ -82866,37 +14931,6 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v.AddArg(v0) return true } - // match: (SETEQ (TESTQ y (SHLQ (MOVQconst [1]) x))) - // cond: !config.nacl - // result: (SETAE (BTQ x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst { - break - } - if v_0_1_0.AuxInt != 1 { - break - } - x := v_0_1.Args[1] - if !(!config.nacl) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } // match: (SETEQ (TESTLconst [c] x)) // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl // result: (SETAE (BTLconst [log2(c)] x)) @@ -82961,30 +14995,6 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { v.AddArg(v0) return true } - // match: (SETEQ (TESTQ x (MOVQconst [c]))) - // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl - // result: (SETAE (BTQconst [log2(c)] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { - break - } - v.reset(OpAMD64SETAE) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (SETEQ (InvertFlags x)) // cond: // result: (SETEQ x) @@ -83400,37 +15410,6 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v.AddArg(v0) return true } - // match: (SETNE (TESTL y (SHLL (MOVLconst [1]) x))) - // cond: !config.nacl - // result: (SETB (BTL x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTL { - break - } - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLL { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVLconst { - break - } - if v_0_1_0.AuxInt != 1 { - break - } - x := v_0_1.Args[1] - if !(!config.nacl) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y)) // cond: !config.nacl // result: (SETB (BTQ x y)) @@ -83462,37 +15441,6 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v.AddArg(v0) return true } - // match: (SETNE (TESTQ y (SHLQ (MOVQconst [1]) x))) - // cond: !config.nacl - // result: (SETB (BTQ x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - y := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64SHLQ { - break - } - v_0_1_0 := v_0_1.Args[0] - if v_0_1_0.Op != OpAMD64MOVQconst { - break - } - if v_0_1_0.AuxInt != 1 { - break - } - x := v_0_1.Args[1] - if !(!config.nacl) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } // match: (SETNE (TESTLconst [c] x)) // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl // result: (SETB (BTLconst [log2(c)] x)) @@ -83557,30 +15505,6 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { v.AddArg(v0) return true } - // match: (SETNE (TESTQ x (MOVQconst [c]))) - // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl - // result: (SETB (BTQconst [log2(c)] x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAMD64TESTQ { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpAMD64MOVQconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { - break - } - v.reset(OpAMD64SETB) - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (SETNE (InvertFlags x)) // cond: // result: (SETNE x) @@ -84464,6 +16388,23 @@ func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { + // match: (TESTL y x:(SHLL _ _)) + // cond: y.Op != OpAMD64SHLL + // result: (TESTL x y) + for { + y := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64SHLL { + break + } + if !(y.Op != OpAMD64SHLL) { + break + } + v.reset(OpAMD64TESTL) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (TESTL (MOVLconst [c]) x) // cond: // result: (TESTLconst [c] x) @@ -84497,6 +16438,23 @@ func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool { + // match: (TESTQ y x:(SHLQ _ _)) + // cond: y.Op != OpAMD64SHLQ + // result: (TESTQ x y) + for { + y := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64SHLQ { + break + } + if !(y.Op != OpAMD64SHLQ) { + break + } + v.reset(OpAMD64TESTQ) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (TESTQ (MOVQconst [c]) x) // cond: is32Bit(c) // result: (TESTQconst [c] x) @@ -84763,9 +16721,9 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHLLconst x [c]) (SHRLconst x [d])) - // cond: d==32-c - // result: (ROLLconst x [c]) + // match: (XORL (SHLLconst x [c]) (SHRLconst x [32-c])) + // cond: + // result: (ROLLconst x [ c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHLLconst { @@ -84777,11 +16735,10 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_1.Op != OpAMD64SHRLconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLLconst) @@ -84789,35 +16746,34 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRLconst x [d]) (SHLLconst x [c])) - // cond: d==32-c - // result: (ROLLconst x [c]) + // match: (XORL (SHRLconst x [c]) (SHLLconst x [32-c])) + // cond: + // result: (ROLLconst x [32-c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHRLconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } - // match: (XORL (SHLLconst x [c]) (SHRWconst x [d])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (XORL (SHLLconst x [c]) (SHRWconst x [16-c])) + // cond: c < 16 && t.Size() == 2 + // result: (ROLWconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -84830,11 +16786,13 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_1.Op != OpAMD64SHRWconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(d == 16-c && c < 16 && t.Size() == 2) { + if !(c < 16 && t.Size() == 2) { break } v.reset(OpAMD64ROLWconst) @@ -84842,36 +16800,38 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRWconst x [d]) (SHLLconst x [c])) - // cond: d==16-c && c < 16 && t.Size() == 2 - // result: (ROLWconst x [c]) + // match: (XORL (SHRWconst x [c]) (SHLLconst x [16-c])) + // cond: c > 0 && t.Size() == 2 + // result: (ROLWconst x [16-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != OpAMD64SHRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 16-c { + break + } if x != v_1.Args[0] { break } - if !(d == 16-c && c < 16 && t.Size() == 2) { + if !(c > 0 && t.Size() == 2) { break } v.reset(OpAMD64ROLWconst) - v.AuxInt = c + v.AuxInt = 16 - c v.AddArg(x) return true } - // match: (XORL (SHLLconst x [c]) (SHRBconst x [d])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (XORL (SHLLconst x [c]) (SHRBconst x [ 8-c])) + // cond: c < 8 && t.Size() == 1 + // result: (ROLBconst x [ c]) for { t := v.Type v_0 := v.Args[0] @@ -84884,11 +16844,13 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { if v_1.Op != OpAMD64SHRBconst { break } - d := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(d == 8-c && c < 8 && t.Size() == 1) { + if !(c < 8 && t.Size() == 1) { break } v.reset(OpAMD64ROLBconst) @@ -84896,30 +16858,32 @@ func rewriteValueAMD64_OpAMD64XORL(v *Value) bool { v.AddArg(x) return true } - // match: (XORL (SHRBconst x [d]) (SHLLconst x [c])) - // cond: d==8-c && c < 8 && t.Size() == 1 - // result: (ROLBconst x [c]) + // match: (XORL (SHRBconst x [c]) (SHLLconst x [ 8-c])) + // cond: c > 0 && t.Size() == 1 + // result: (ROLBconst x [ 8-c]) for { t := v.Type v_0 := v.Args[0] if v_0.Op != OpAMD64SHRBconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLLconst { break } - c := v_1.AuxInt + if v_1.AuxInt != 8-c { + break + } if x != v_1.Args[0] { break } - if !(d == 8-c && c < 8 && t.Size() == 1) { + if !(c > 0 && t.Size() == 1) { break } v.reset(OpAMD64ROLBconst) - v.AuxInt = c + v.AuxInt = 8 - c v.AddArg(x) return true } @@ -85069,9 +17033,9 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { v.AddArg(x) return true } - // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d])) - // cond: d==64-c - // result: (ROLQconst x [c]) + // match: (XORQ (SHLQconst x [c]) (SHRQconst x [64-c])) + // cond: + // result: (ROLQconst x [ c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHLQconst { @@ -85083,11 +17047,10 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { if v_1.Op != OpAMD64SHRQconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLQconst) @@ -85095,29 +17058,28 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { v.AddArg(x) return true } - // match: (XORQ (SHRQconst x [d]) (SHLQconst x [c])) - // cond: d==64-c - // result: (ROLQconst x [c]) + // match: (XORQ (SHRQconst x [c]) (SHLQconst x [64-c])) + // cond: + // result: (ROLQconst x [64-c]) for { v_0 := v.Args[0] if v_0.Op != OpAMD64SHRQconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpAMD64SHLQconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpAMD64ROLQconst) - v.AuxInt = c + v.AuxInt = 64 - c v.AddArg(x) return true } @@ -85230,7 +17192,7 @@ func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool { return false } func rewriteValueAMD64_OpAdd16(v *Value) bool { - // match: (Add16 x y) + // match: (Add16 x y) // cond: // result: (ADDL x y) for { @@ -85243,7 +17205,7 @@ func rewriteValueAMD64_OpAdd16(v *Value) bool { } } func rewriteValueAMD64_OpAdd32(v *Value) bool { - // match: (Add32 x y) + // match: (Add32 x y) // cond: // result: (ADDL x y) for { @@ -85269,7 +17231,7 @@ func rewriteValueAMD64_OpAdd32F(v *Value) bool { } } func rewriteValueAMD64_OpAdd64(v *Value) bool { - // match: (Add64 x y) + // match: (Add64 x y) // cond: // result: (ADDQ x y) for { @@ -85295,7 +17257,7 @@ func rewriteValueAMD64_OpAdd64F(v *Value) bool { } } func rewriteValueAMD64_OpAdd8(v *Value) bool { - // match: (Add8 x y) + // match: (Add8 x y) // cond: // result: (ADDL x y) for { @@ -85417,7 +17379,7 @@ func rewriteValueAMD64_OpAnd64(v *Value) bool { } } func rewriteValueAMD64_OpAnd8(v *Value) bool { - // match: (And8 x y) + // match: (And8 x y) // cond: // result: (ANDL x y) for { @@ -85862,7 +17824,7 @@ func rewriteValueAMD64_OpCom64(v *Value) bool { } } func rewriteValueAMD64_OpCom8(v *Value) bool { - // match: (Com8 x) + // match: (Com8 x) // cond: // result: (NOTL x) for { @@ -85873,7 +17835,7 @@ func rewriteValueAMD64_OpCom8(v *Value) bool { } } func rewriteValueAMD64_OpConst16(v *Value) bool { - // match: (Const16 [val]) + // match: (Const16 [val]) // cond: // result: (MOVLconst [val]) for { @@ -85884,7 +17846,7 @@ func rewriteValueAMD64_OpConst16(v *Value) bool { } } func rewriteValueAMD64_OpConst32(v *Value) bool { - // match: (Const32 [val]) + // match: (Const32 [val]) // cond: // result: (MOVLconst [val]) for { @@ -85906,7 +17868,7 @@ func rewriteValueAMD64_OpConst32F(v *Value) bool { } } func rewriteValueAMD64_OpConst64(v *Value) bool { - // match: (Const64 [val]) + // match: (Const64 [val]) // cond: // result: (MOVQconst [val]) for { @@ -85928,7 +17890,7 @@ func rewriteValueAMD64_OpConst64F(v *Value) bool { } } func rewriteValueAMD64_OpConst8(v *Value) bool { - // match: (Const8 [val]) + // match: (Const8 [val]) // cond: // result: (MOVLconst [val]) for { @@ -86197,7 +18159,7 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div16 x y) + // match: (Div16 x y) // cond: // result: (Select0 (DIVW x y)) for { @@ -86235,7 +18197,7 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div32 x y) + // match: (Div32 x y) // cond: // result: (Select0 (DIVL x y)) for { @@ -86286,7 +18248,7 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div64 x y) + // match: (Div64 x y) // cond: // result: (Select0 (DIVQ x y)) for { @@ -86337,7 +18299,7 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8 x y) + // match: (Div8 x y) // cond: // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { @@ -86360,7 +18322,7 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8u x y) + // match: (Div8u x y) // cond: // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { @@ -86381,7 +18343,7 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool { func rewriteValueAMD64_OpEq16(v *Value) bool { b := v.Block _ = b - // match: (Eq16 x y) + // match: (Eq16 x y) // cond: // result: (SETEQ (CMPW x y)) for { @@ -86398,7 +18360,7 @@ func rewriteValueAMD64_OpEq16(v *Value) bool { func rewriteValueAMD64_OpEq32(v *Value) bool { b := v.Block _ = b - // match: (Eq32 x y) + // match: (Eq32 x y) // cond: // result: (SETEQ (CMPL x y)) for { @@ -86432,7 +18394,7 @@ func rewriteValueAMD64_OpEq32F(v *Value) bool { func rewriteValueAMD64_OpEq64(v *Value) bool { b := v.Block _ = b - // match: (Eq64 x y) + // match: (Eq64 x y) // cond: // result: (SETEQ (CMPQ x y)) for { @@ -86466,7 +18428,7 @@ func rewriteValueAMD64_OpEq64F(v *Value) bool { func rewriteValueAMD64_OpEq8(v *Value) bool { b := v.Block _ = b - // match: (Eq8 x y) + // match: (Eq8 x y) // cond: // result: (SETEQ (CMPB x y)) for { @@ -86483,7 +18445,7 @@ func rewriteValueAMD64_OpEq8(v *Value) bool { func rewriteValueAMD64_OpEqB(v *Value) bool { b := v.Block _ = b - // match: (EqB x y) + // match: (EqB x y) // cond: // result: (SETEQ (CMPB x y)) for { @@ -86539,7 +18501,7 @@ func rewriteValueAMD64_OpEqPtr(v *Value) bool { func rewriteValueAMD64_OpGeq16(v *Value) bool { b := v.Block _ = b - // match: (Geq16 x y) + // match: (Geq16 x y) // cond: // result: (SETGE (CMPW x y)) for { @@ -86573,7 +18535,7 @@ func rewriteValueAMD64_OpGeq16U(v *Value) bool { func rewriteValueAMD64_OpGeq32(v *Value) bool { b := v.Block _ = b - // match: (Geq32 x y) + // match: (Geq32 x y) // cond: // result: (SETGE (CMPL x y)) for { @@ -86624,7 +18586,7 @@ func rewriteValueAMD64_OpGeq32U(v *Value) bool { func rewriteValueAMD64_OpGeq64(v *Value) bool { b := v.Block _ = b - // match: (Geq64 x y) + // match: (Geq64 x y) // cond: // result: (SETGE (CMPQ x y)) for { @@ -86675,7 +18637,7 @@ func rewriteValueAMD64_OpGeq64U(v *Value) bool { func rewriteValueAMD64_OpGeq8(v *Value) bool { b := v.Block _ = b - // match: (Geq8 x y) + // match: (Geq8 x y) // cond: // result: (SETGE (CMPB x y)) for { @@ -86692,7 +18654,7 @@ func rewriteValueAMD64_OpGeq8(v *Value) bool { func rewriteValueAMD64_OpGeq8U(v *Value) bool { b := v.Block _ = b - // match: (Geq8U x y) + // match: (Geq8U x y) // cond: // result: (SETAE (CMPB x y)) for { @@ -86729,7 +18691,7 @@ func rewriteValueAMD64_OpGetG(v *Value) bool { func rewriteValueAMD64_OpGreater16(v *Value) bool { b := v.Block _ = b - // match: (Greater16 x y) + // match: (Greater16 x y) // cond: // result: (SETG (CMPW x y)) for { @@ -86763,7 +18725,7 @@ func rewriteValueAMD64_OpGreater16U(v *Value) bool { func rewriteValueAMD64_OpGreater32(v *Value) bool { b := v.Block _ = b - // match: (Greater32 x y) + // match: (Greater32 x y) // cond: // result: (SETG (CMPL x y)) for { @@ -86814,7 +18776,7 @@ func rewriteValueAMD64_OpGreater32U(v *Value) bool { func rewriteValueAMD64_OpGreater64(v *Value) bool { b := v.Block _ = b - // match: (Greater64 x y) + // match: (Greater64 x y) // cond: // result: (SETG (CMPQ x y)) for { @@ -86865,7 +18827,7 @@ func rewriteValueAMD64_OpGreater64U(v *Value) bool { func rewriteValueAMD64_OpGreater8(v *Value) bool { b := v.Block _ = b - // match: (Greater8 x y) + // match: (Greater8 x y) // cond: // result: (SETG (CMPB x y)) for { @@ -86882,7 +18844,7 @@ func rewriteValueAMD64_OpGreater8(v *Value) bool { func rewriteValueAMD64_OpGreater8U(v *Value) bool { b := v.Block _ = b - // match: (Greater8U x y) + // match: (Greater8U x y) // cond: // result: (SETA (CMPB x y)) for { @@ -86897,7 +18859,7 @@ func rewriteValueAMD64_OpGreater8U(v *Value) bool { } } func rewriteValueAMD64_OpHmul32(v *Value) bool { - // match: (Hmul32 x y) + // match: (Hmul32 x y) // cond: // result: (HMULL x y) for { @@ -86923,7 +18885,7 @@ func rewriteValueAMD64_OpHmul32u(v *Value) bool { } } func rewriteValueAMD64_OpHmul64(v *Value) bool { - // match: (Hmul64 x y) + // match: (Hmul64 x y) // cond: // result: (HMULQ x y) for { @@ -87049,7 +19011,7 @@ func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool { func rewriteValueAMD64_OpLeq16(v *Value) bool { b := v.Block _ = b - // match: (Leq16 x y) + // match: (Leq16 x y) // cond: // result: (SETLE (CMPW x y)) for { @@ -87083,7 +19045,7 @@ func rewriteValueAMD64_OpLeq16U(v *Value) bool { func rewriteValueAMD64_OpLeq32(v *Value) bool { b := v.Block _ = b - // match: (Leq32 x y) + // match: (Leq32 x y) // cond: // result: (SETLE (CMPL x y)) for { @@ -87134,7 +19096,7 @@ func rewriteValueAMD64_OpLeq32U(v *Value) bool { func rewriteValueAMD64_OpLeq64(v *Value) bool { b := v.Block _ = b - // match: (Leq64 x y) + // match: (Leq64 x y) // cond: // result: (SETLE (CMPQ x y)) for { @@ -87185,7 +19147,7 @@ func rewriteValueAMD64_OpLeq64U(v *Value) bool { func rewriteValueAMD64_OpLeq8(v *Value) bool { b := v.Block _ = b - // match: (Leq8 x y) + // match: (Leq8 x y) // cond: // result: (SETLE (CMPB x y)) for { @@ -87202,7 +19164,7 @@ func rewriteValueAMD64_OpLeq8(v *Value) bool { func rewriteValueAMD64_OpLeq8U(v *Value) bool { b := v.Block _ = b - // match: (Leq8U x y) + // match: (Leq8U x y) // cond: // result: (SETBE (CMPB x y)) for { @@ -87219,7 +19181,7 @@ func rewriteValueAMD64_OpLeq8U(v *Value) bool { func rewriteValueAMD64_OpLess16(v *Value) bool { b := v.Block _ = b - // match: (Less16 x y) + // match: (Less16 x y) // cond: // result: (SETL (CMPW x y)) for { @@ -87253,7 +19215,7 @@ func rewriteValueAMD64_OpLess16U(v *Value) bool { func rewriteValueAMD64_OpLess32(v *Value) bool { b := v.Block _ = b - // match: (Less32 x y) + // match: (Less32 x y) // cond: // result: (SETL (CMPL x y)) for { @@ -87304,7 +19266,7 @@ func rewriteValueAMD64_OpLess32U(v *Value) bool { func rewriteValueAMD64_OpLess64(v *Value) bool { b := v.Block _ = b - // match: (Less64 x y) + // match: (Less64 x y) // cond: // result: (SETL (CMPQ x y)) for { @@ -87355,7 +19317,7 @@ func rewriteValueAMD64_OpLess64U(v *Value) bool { func rewriteValueAMD64_OpLess8(v *Value) bool { b := v.Block _ = b - // match: (Less8 x y) + // match: (Less8 x y) // cond: // result: (SETL (CMPB x y)) for { @@ -87372,7 +19334,7 @@ func rewriteValueAMD64_OpLess8(v *Value) bool { func rewriteValueAMD64_OpLess8U(v *Value) bool { b := v.Block _ = b - // match: (Less8U x y) + // match: (Less8U x y) // cond: // result: (SETB (CMPB x y)) for { @@ -87558,7 +19520,7 @@ func rewriteValueAMD64_OpLsh16x64(v *Value) bool { func rewriteValueAMD64_OpLsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -87654,7 +19616,7 @@ func rewriteValueAMD64_OpLsh32x64(v *Value) bool { func rewriteValueAMD64_OpLsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -87750,7 +19712,7 @@ func rewriteValueAMD64_OpLsh64x64(v *Value) bool { func rewriteValueAMD64_OpLsh64x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x8 x y) + // match: (Lsh64x8 x y) // cond: // result: (ANDQ (SHLQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { @@ -87846,7 +19808,7 @@ func rewriteValueAMD64_OpLsh8x64(v *Value) bool { func rewriteValueAMD64_OpLsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -87872,7 +19834,7 @@ func rewriteValueAMD64_OpMod16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod16 x y) + // match: (Mod16 x y) // cond: // result: (Select1 (DIVW x y)) for { @@ -87910,7 +19872,7 @@ func rewriteValueAMD64_OpMod32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod32 x y) + // match: (Mod32 x y) // cond: // result: (Select1 (DIVL x y)) for { @@ -87948,7 +19910,7 @@ func rewriteValueAMD64_OpMod64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod64 x y) + // match: (Mod64 x y) // cond: // result: (Select1 (DIVQ x y)) for { @@ -87986,7 +19948,7 @@ func rewriteValueAMD64_OpMod8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8 x y) + // match: (Mod8 x y) // cond: // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) for { @@ -88009,7 +19971,7 @@ func rewriteValueAMD64_OpMod8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8u x y) + // match: (Mod8u x y) // cond: // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) for { @@ -88386,7 +20348,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool { return false } func rewriteValueAMD64_OpMul16(v *Value) bool { - // match: (Mul16 x y) + // match: (Mul16 x y) // cond: // result: (MULL x y) for { @@ -88399,7 +20361,7 @@ func rewriteValueAMD64_OpMul16(v *Value) bool { } } func rewriteValueAMD64_OpMul32(v *Value) bool { - // match: (Mul32 x y) + // match: (Mul32 x y) // cond: // result: (MULL x y) for { @@ -88425,7 +20387,7 @@ func rewriteValueAMD64_OpMul32F(v *Value) bool { } } func rewriteValueAMD64_OpMul64(v *Value) bool { - // match: (Mul64 x y) + // match: (Mul64 x y) // cond: // result: (MULQ x y) for { @@ -88464,7 +20426,7 @@ func rewriteValueAMD64_OpMul64uhilo(v *Value) bool { } } func rewriteValueAMD64_OpMul8(v *Value) bool { - // match: (Mul8 x y) + // match: (Mul8 x y) // cond: // result: (MULL x y) for { @@ -88477,7 +20439,7 @@ func rewriteValueAMD64_OpMul8(v *Value) bool { } } func rewriteValueAMD64_OpNeg16(v *Value) bool { - // match: (Neg16 x) + // match: (Neg16 x) // cond: // result: (NEGL x) for { @@ -88488,7 +20450,7 @@ func rewriteValueAMD64_OpNeg16(v *Value) bool { } } func rewriteValueAMD64_OpNeg32(v *Value) bool { - // match: (Neg32 x) + // match: (Neg32 x) // cond: // result: (NEGL x) for { @@ -88517,7 +20479,7 @@ func rewriteValueAMD64_OpNeg32F(v *Value) bool { } } func rewriteValueAMD64_OpNeg64(v *Value) bool { - // match: (Neg64 x) + // match: (Neg64 x) // cond: // result: (NEGQ x) for { @@ -88546,7 +20508,7 @@ func rewriteValueAMD64_OpNeg64F(v *Value) bool { } } func rewriteValueAMD64_OpNeg8(v *Value) bool { - // match: (Neg8 x) + // match: (Neg8 x) // cond: // result: (NEGL x) for { @@ -88559,7 +20521,7 @@ func rewriteValueAMD64_OpNeg8(v *Value) bool { func rewriteValueAMD64_OpNeq16(v *Value) bool { b := v.Block _ = b - // match: (Neq16 x y) + // match: (Neq16 x y) // cond: // result: (SETNE (CMPW x y)) for { @@ -88576,7 +20538,7 @@ func rewriteValueAMD64_OpNeq16(v *Value) bool { func rewriteValueAMD64_OpNeq32(v *Value) bool { b := v.Block _ = b - // match: (Neq32 x y) + // match: (Neq32 x y) // cond: // result: (SETNE (CMPL x y)) for { @@ -88610,7 +20572,7 @@ func rewriteValueAMD64_OpNeq32F(v *Value) bool { func rewriteValueAMD64_OpNeq64(v *Value) bool { b := v.Block _ = b - // match: (Neq64 x y) + // match: (Neq64 x y) // cond: // result: (SETNE (CMPQ x y)) for { @@ -88644,7 +20606,7 @@ func rewriteValueAMD64_OpNeq64F(v *Value) bool { func rewriteValueAMD64_OpNeq8(v *Value) bool { b := v.Block _ = b - // match: (Neq8 x y) + // match: (Neq8 x y) // cond: // result: (SETNE (CMPB x y)) for { @@ -88661,7 +20623,7 @@ func rewriteValueAMD64_OpNeq8(v *Value) bool { func rewriteValueAMD64_OpNeqB(v *Value) bool { b := v.Block _ = b - // match: (NeqB x y) + // match: (NeqB x y) // cond: // result: (SETNE (CMPB x y)) for { @@ -88832,7 +20794,7 @@ func rewriteValueAMD64_OpOr64(v *Value) bool { } } func rewriteValueAMD64_OpOr8(v *Value) bool { - // match: (Or8 x y) + // match: (Or8 x y) // cond: // result: (ORL x y) for { @@ -88956,7 +20918,7 @@ func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool { func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { @@ -89061,7 +21023,7 @@ func rewriteValueAMD64_OpRsh16x64(v *Value) bool { func rewriteValueAMD64_OpRsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { @@ -89160,7 +21122,7 @@ func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool { func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { @@ -89265,7 +21227,7 @@ func rewriteValueAMD64_OpRsh32x64(v *Value) bool { func rewriteValueAMD64_OpRsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { @@ -89364,7 +21326,7 @@ func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool { func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh64Ux8 x y) + // match: (Rsh64Ux8 x y) // cond: // result: (ANDQ (SHRQ x y) (SBBQcarrymask (CMPBconst y [64]))) for { @@ -89469,7 +21431,7 @@ func rewriteValueAMD64_OpRsh64x64(v *Value) bool { func rewriteValueAMD64_OpRsh64x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x8 x y) + // match: (Rsh64x8 x y) // cond: // result: (SARQ x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [64]))))) for { @@ -89568,7 +21530,7 @@ func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool { func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { @@ -89673,7 +21635,7 @@ func rewriteValueAMD64_OpRsh8x64(v *Value) bool { func rewriteValueAMD64_OpRsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { @@ -89739,7 +21701,7 @@ func rewriteValueAMD64_OpSelect0(v *Value) bool { return false } func rewriteValueAMD64_OpSelect1(v *Value) bool { - // match: (Select1 (AddTupleFirst32 tuple _)) + // match: (Select1 (AddTupleFirst32 tuple _ )) // cond: // result: (Select1 tuple) for { @@ -89752,7 +21714,7 @@ func rewriteValueAMD64_OpSelect1(v *Value) bool { v.AddArg(tuple) return true } - // match: (Select1 (AddTupleFirst64 tuple _)) + // match: (Select1 (AddTupleFirst64 tuple _ )) // cond: // result: (Select1 tuple) for { @@ -89801,7 +21763,7 @@ func rewriteValueAMD64_OpSignExt32to64(v *Value) bool { } } func rewriteValueAMD64_OpSignExt8to16(v *Value) bool { - // match: (SignExt8to16 x) + // match: (SignExt8to16 x) // cond: // result: (MOVBQSX x) for { @@ -89812,7 +21774,7 @@ func rewriteValueAMD64_OpSignExt8to16(v *Value) bool { } } func rewriteValueAMD64_OpSignExt8to32(v *Value) bool { - // match: (SignExt8to32 x) + // match: (SignExt8to32 x) // cond: // result: (MOVBQSX x) for { @@ -89823,7 +21785,7 @@ func rewriteValueAMD64_OpSignExt8to32(v *Value) bool { } } func rewriteValueAMD64_OpSignExt8to64(v *Value) bool { - // match: (SignExt8to64 x) + // match: (SignExt8to64 x) // cond: // result: (MOVBQSX x) for { @@ -89982,7 +21944,7 @@ func rewriteValueAMD64_OpStore(v *Value) bool { return false } func rewriteValueAMD64_OpSub16(v *Value) bool { - // match: (Sub16 x y) + // match: (Sub16 x y) // cond: // result: (SUBL x y) for { @@ -89995,7 +21957,7 @@ func rewriteValueAMD64_OpSub16(v *Value) bool { } } func rewriteValueAMD64_OpSub32(v *Value) bool { - // match: (Sub32 x y) + // match: (Sub32 x y) // cond: // result: (SUBL x y) for { @@ -90021,7 +21983,7 @@ func rewriteValueAMD64_OpSub32F(v *Value) bool { } } func rewriteValueAMD64_OpSub64(v *Value) bool { - // match: (Sub64 x y) + // match: (Sub64 x y) // cond: // result: (SUBQ x y) for { @@ -90047,7 +22009,7 @@ func rewriteValueAMD64_OpSub64F(v *Value) bool { } } func rewriteValueAMD64_OpSub8(v *Value) bool { - // match: (Sub8 x y) + // match: (Sub8 x y) // cond: // result: (SUBL x y) for { @@ -90095,7 +22057,7 @@ func rewriteValueAMD64_OpSubPtr(v *Value) bool { return false } func rewriteValueAMD64_OpTrunc16to8(v *Value) bool { - // match: (Trunc16to8 x) + // match: (Trunc16to8 x) // cond: // result: x for { @@ -90119,7 +22081,7 @@ func rewriteValueAMD64_OpTrunc32to16(v *Value) bool { } } func rewriteValueAMD64_OpTrunc32to8(v *Value) bool { - // match: (Trunc32to8 x) + // match: (Trunc32to8 x) // cond: // result: x for { @@ -90155,7 +22117,7 @@ func rewriteValueAMD64_OpTrunc64to32(v *Value) bool { } } func rewriteValueAMD64_OpTrunc64to8(v *Value) bool { - // match: (Trunc64to8 x) + // match: (Trunc64to8 x) // cond: // result: x for { @@ -90206,7 +22168,7 @@ func rewriteValueAMD64_OpXor64(v *Value) bool { } } func rewriteValueAMD64_OpXor8(v *Value) bool { - // match: (Xor8 x y) + // match: (Xor8 x y) // cond: // result: (XORL x y) for { @@ -90567,7 +22529,7 @@ func rewriteValueAMD64_OpZeroExt32to64(v *Value) bool { } } func rewriteValueAMD64_OpZeroExt8to16(v *Value) bool { - // match: (ZeroExt8to16 x) + // match: (ZeroExt8to16 x) // cond: // result: (MOVBQZX x) for { @@ -90578,7 +22540,7 @@ func rewriteValueAMD64_OpZeroExt8to16(v *Value) bool { } } func rewriteValueAMD64_OpZeroExt8to32(v *Value) bool { - // match: (ZeroExt8to32 x) + // match: (ZeroExt8to32 x) // cond: // result: (MOVBQZX x) for { @@ -90589,7 +22551,7 @@ func rewriteValueAMD64_OpZeroExt8to32(v *Value) bool { } } func rewriteValueAMD64_OpZeroExt8to64(v *Value) bool { - // match: (ZeroExt8to64 x) + // match: (ZeroExt8to64 x) // cond: // result: (MOVBQZX x) for { @@ -90639,37 +22601,6 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) - // cond: !config.nacl - // result: (UGE (BTL x y)) - for { - v := b.Control - if v.Op != OpAMD64TESTL { - break - } - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVLconst { - break - } - if v_1_0.AuxInt != 1 { - break - } - x := v_1.Args[1] - if !(!config.nacl) { - break - } - b.Kind = BlockAMD64UGE - v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.SetControl(v0) - return true - } // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) // cond: !config.nacl // result: (UGE (BTQ x y)) @@ -90701,37 +22632,6 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) - // cond: !config.nacl - // result: (UGE (BTQ x y)) - for { - v := b.Control - if v.Op != OpAMD64TESTQ { - break - } - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst { - break - } - if v_1_0.AuxInt != 1 { - break - } - x := v_1.Args[1] - if !(!config.nacl) { - break - } - b.Kind = BlockAMD64UGE - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.SetControl(v0) - return true - } // match: (EQ (TESTLconst [c] x)) // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl // result: (UGE (BTLconst [log2(c)] x)) @@ -90796,30 +22696,6 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (EQ (TESTQ x (MOVQconst [c]))) - // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl - // result: (UGE (BTQconst [log2(c)] x)) - for { - v := b.Control - if v.Op != OpAMD64TESTQ { - break - } - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { - break - } - b.Kind = BlockAMD64UGE - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.SetControl(v0) - return true - } // match: (EQ (InvertFlags cmp) yes no) // cond: // result: (EQ cmp yes no) @@ -91123,7 +22999,7 @@ func rewriteBlockAMD64(b *Block) bool { return true } case BlockIf: - // match: (If (SETL cmp) yes no) + // match: (If (SETL cmp) yes no) // cond: // result: (LT cmp yes no) for { @@ -91157,7 +23033,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (If (SETG cmp) yes no) + // match: (If (SETG cmp) yes no) // cond: // result: (GT cmp yes no) for { @@ -91225,7 +23101,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (If (SETB cmp) yes no) + // match: (If (SETB cmp) yes no) // cond: // result: (ULT cmp yes no) for { @@ -91259,7 +23135,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (If (SETA cmp) yes no) + // match: (If (SETA cmp) yes no) // cond: // result: (UGT cmp yes no) for { @@ -91293,7 +23169,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (If (SETGF cmp) yes no) + // match: (If (SETGF cmp) yes no) // cond: // result: (UGT cmp yes no) for { @@ -91581,35 +23457,7 @@ func rewriteBlockAMD64(b *Block) bool { return true } case BlockAMD64NE: - // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) - // cond: - // result: (LT cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETL { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETL { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64LT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) + // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) // cond: // result: (LT cmp yes no) for { @@ -91665,63 +23513,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) - // cond: - // result: (LE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETLE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETLE { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64LE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) - // cond: - // result: (GT cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETG { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETG { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64GT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) + // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) // cond: // result: (GT cmp yes no) for { @@ -91777,62 +23569,6 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) - // cond: - // result: (GE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETGE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETGE { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64GE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) - // cond: - // result: (EQ cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETEQ { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETEQ { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64EQ - b.SetControl(cmp) - _ = yes - _ = no - return true - } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) // cond: // result: (EQ cmp yes no) @@ -91889,63 +23625,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) - // cond: - // result: (NE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETNE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETNE { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64NE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) - // cond: - // result: (ULT cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETB { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETB { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64ULT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) + // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) // cond: // result: (ULT cmp yes no) for { @@ -92001,63 +23681,7 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) - // cond: - // result: (ULE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETBE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETBE { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64ULE - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) - // cond: - // result: (UGT cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETA { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETA { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64UGT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) + // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) // cond: // result: (UGT cmp yes no) for { @@ -92113,34 +23737,6 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) - // cond: - // result: (UGE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETAE { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETAE { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64UGE - b.SetControl(cmp) - _ = yes - _ = no - return true - } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) // cond: !config.nacl // result: (ULT (BTL x y)) @@ -92172,37 +23768,6 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) - // cond: !config.nacl - // result: (ULT (BTL x y)) - for { - v := b.Control - if v.Op != OpAMD64TESTL { - break - } - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLL { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVLconst { - break - } - if v_1_0.AuxInt != 1 { - break - } - x := v_1.Args[1] - if !(!config.nacl) { - break - } - b.Kind = BlockAMD64ULT - v0 := b.NewValue0(v.Pos, OpAMD64BTL, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.SetControl(v0) - return true - } // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) // cond: !config.nacl // result: (ULT (BTQ x y)) @@ -92234,37 +23799,6 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) - // cond: !config.nacl - // result: (ULT (BTQ x y)) - for { - v := b.Control - if v.Op != OpAMD64TESTQ { - break - } - y := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SHLQ { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpAMD64MOVQconst { - break - } - if v_1_0.AuxInt != 1 { - break - } - x := v_1.Args[1] - if !(!config.nacl) { - break - } - b.Kind = BlockAMD64ULT - v0 := b.NewValue0(v.Pos, OpAMD64BTQ, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - b.SetControl(v0) - return true - } // match: (NE (TESTLconst [c] x)) // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl // result: (ULT (BTLconst [log2(c)] x)) @@ -92329,31 +23863,7 @@ func rewriteBlockAMD64(b *Block) bool { b.SetControl(v0) return true } - // match: (NE (TESTQ x (MOVQconst [c]))) - // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl - // result: (ULT (BTQconst [log2(c)] x)) - for { - v := b.Control - if v.Op != OpAMD64TESTQ { - break - } - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64MOVQconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { - break - } - b.Kind = BlockAMD64ULT - v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, TypeFlags) - v0.AuxInt = log2(c) - v0.AddArg(x) - b.SetControl(v0) - return true - } - // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) + // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) // cond: // result: (UGT cmp yes no) for { @@ -92381,62 +23891,6 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) - // cond: - // result: (UGT cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETGF { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETGF { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64UGT - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) - // cond: - // result: (UGE cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETGEF { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETGEF { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64UGE - b.SetControl(cmp) - _ = yes - _ = no - return true - } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) // cond: // result: (UGE cmp yes no) @@ -92493,62 +23947,6 @@ func rewriteBlockAMD64(b *Block) bool { _ = no return true } - // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) - // cond: - // result: (EQF cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETEQF { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETEQF { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64EQF - b.SetControl(cmp) - _ = yes - _ = no - return true - } - // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) - // cond: - // result: (NEF cmp yes no) - for { - v := b.Control - if v.Op != OpAMD64TESTB { - break - } - v_0 := v.Args[0] - if v_0.Op != OpAMD64SETNEF { - break - } - cmp := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAMD64SETNEF { - break - } - if cmp != v_1.Args[0] { - break - } - yes := b.Succs[0] - no := b.Succs[1] - b.Kind = BlockAMD64NEF - b.SetControl(cmp) - _ = yes - _ = no - return true - } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) // cond: // result: (NEF cmp yes no) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 25b6988383..0b554d79a4 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -758,40 +758,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC x (MOVWconst [c]) flags) - // cond: - // result: (ADCconst [c] x flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - flags := v.Args[2] - v.reset(OpARMADCconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) - return true - } - // match: (ADC (MOVWconst [c]) x flags) - // cond: - // result: (ADCconst [c] x flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCconst) - v.AuxInt = c - v.AddArg(x) - v.AddArg(flags) - return true - } // match: (ADC x (SLLconst [c] y) flags) // cond: // result: (ADCshiftLL x y [c] flags) @@ -830,44 +796,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SLLconst [c] y) x flags) - // cond: - // result: (ADCshiftLL x y [c] flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSLLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC x (SLLconst [c] y) flags) - // cond: - // result: (ADCshiftLL x y [c] flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLLconst { - break - } - c := v_1.AuxInt - y := v_1.Args[0] - flags := v.Args[2] - v.reset(OpARMADCshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } // match: (ADC x (SRLconst [c] y) flags) // cond: // result: (ADCshiftRL x y [c] flags) @@ -906,44 +834,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SRLconst [c] y) x flags) - // cond: - // result: (ADCshiftRL x y [c] flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSRLconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC x (SRLconst [c] y) flags) - // cond: - // result: (ADCshiftRL x y [c] flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRLconst { - break - } - c := v_1.AuxInt - y := v_1.Args[0] - flags := v.Args[2] - v.reset(OpARMADCshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } // match: (ADC x (SRAconst [c] y) flags) // cond: // result: (ADCshiftRA x y [c] flags) @@ -982,44 +872,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SRAconst [c] y) x flags) - // cond: - // result: (ADCshiftRA x y [c] flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSRAconst { - break - } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } - // match: (ADC x (SRAconst [c] y) flags) - // cond: - // result: (ADCshiftRA x y [c] flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRAconst { - break - } - c := v_1.AuxInt - y := v_1.Args[0] - flags := v.Args[2] - v.reset(OpARMADCshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - v.AddArg(flags) - return true - } // match: (ADC x (SLL y z) flags) // cond: // result: (ADCshiftLLreg x y z flags) @@ -1058,44 +910,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SLL y z) x flags) - // cond: - // result: (ADCshiftLLreg x y z flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSLL { - break - } - y := v_0.Args[0] - z := v_0.Args[1] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - // match: (ADC x (SLL y z) flags) - // cond: - // result: (ADCshiftLLreg x y z flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSLL { - break - } - y := v_1.Args[0] - z := v_1.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftLLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } // match: (ADC x (SRL y z) flags) // cond: // result: (ADCshiftRLreg x y z flags) @@ -1134,44 +948,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SRL y z) x flags) - // cond: - // result: (ADCshiftRLreg x y z flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSRL { - break - } - y := v_0.Args[0] - z := v_0.Args[1] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - // match: (ADC x (SRL y z) flags) - // cond: - // result: (ADCshiftRLreg x y z flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRL { - break - } - y := v_1.Args[0] - z := v_1.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRLreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } // match: (ADC x (SRA y z) flags) // cond: // result: (ADCshiftRAreg x y z flags) @@ -1210,44 +986,6 @@ func rewriteValueARM_OpARMADC(v *Value) bool { v.AddArg(flags) return true } - // match: (ADC (SRA y z) x flags) - // cond: - // result: (ADCshiftRAreg x y z flags) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMSRA { - break - } - y := v_0.Args[0] - z := v_0.Args[1] - x := v.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } - // match: (ADC x (SRA y z) flags) - // cond: - // result: (ADCshiftRAreg x y z flags) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMSRA { - break - } - y := v_1.Args[0] - z := v_1.Args[1] - flags := v.Args[2] - v.reset(OpARMADCshiftRAreg) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - v.AddArg(flags) - return true - } return false } func rewriteValueARM_OpARMADCconst(v *Value) bool { @@ -1559,31 +1297,31 @@ func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool { func rewriteValueARM_OpARMADD(v *Value) bool { b := v.Block _ = b - // match: (ADD x (MOVWconst [c])) + // match: (ADD (MOVWconst [c]) x) // cond: // result: (ADDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARMADDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ADD (MOVWconst [c]) x) + // match: (ADD x (MOVWconst [c])) // cond: // result: (ADDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARMADDconst) v.AuxInt = c v.AddArg(x) @@ -1854,31 +1592,6 @@ func rewriteValueARM_OpARMADD(v *Value) bool { v.AddArg(v0) return true } - // match: (ADD (RSBconst [d] y) (RSBconst [c] x)) - // cond: - // result: (RSBconst [c+d] (ADD x y)) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpARMRSBconst { - break - } - d := v_0.AuxInt - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMRSBconst { - break - } - c := v_1.AuxInt - x := v_1.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = c + d - v0 := b.NewValue0(v.Pos, OpARMADD, t) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } // match: (ADD (MUL x y) a) // cond: // result: (MULA x y a) @@ -1916,31 +1629,31 @@ func rewriteValueARM_OpARMADD(v *Value) bool { return false } func rewriteValueARM_OpARMADDS(v *Value) bool { - // match: (ADDS x (MOVWconst [c])) + // match: (ADDS (MOVWconst [c]) x) // cond: // result: (ADDSconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARMADDSconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ADDS (MOVWconst [c]) x) + // match: (ADDS x (MOVWconst [c])) // cond: // result: (ADDSconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARMADDSconst) v.AuxInt = c v.AddArg(x) @@ -2777,31 +2490,31 @@ func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool { return false } func rewriteValueARM_OpARMAND(v *Value) bool { - // match: (AND x (MOVWconst [c])) + // match: (AND (MOVWconst [c]) x) // cond: // result: (ANDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARMANDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (AND (MOVWconst [c]) x) + // match: (AND x (MOVWconst [c])) // cond: // result: (ANDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARMANDconst) v.AuxInt = c v.AddArg(x) @@ -7409,9 +7122,185 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { if !(int32(c) == -1) { break } - v.reset(OpARMRSBconst) - v.AuxInt = 0 - v.AddArg(x) + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) + return true + } + // match: (MUL _ (MOVWconst [0])) + // cond: + // result: (MOVWconst [0]) + for { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (MUL x (MOVWconst [1])) + // cond: + // result: x + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 1 { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARMADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (RSBshiftLL x x [log2(c+1)]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARMRSBshiftLL) + v.AuxInt = log2(c + 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MUL x (MOVWconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARMSLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } // match: (MUL (MOVWconst [c]) x) @@ -7432,21 +7321,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL _ (MOVWconst [0])) - // cond: - // result: (MOVWconst [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpARMMOVWconst) - v.AuxInt = 0 - return true - } // match: (MUL (MOVWconst [0]) _) // cond: // result: (MOVWconst [0]) @@ -7462,23 +7336,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = 0 return true } - // match: (MUL x (MOVWconst [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (MUL (MOVWconst [1]) x) // cond: // result: x @@ -7496,24 +7353,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [c])) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) @@ -7532,25 +7371,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [c])) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARMADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: isPowerOfTwo(c-1) && int32(c) >= 3 // result: (ADDshiftLL x x [log2(c-1)]) @@ -7570,25 +7390,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (RSBshiftLL x x [log2(c+1)]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARMRSBshiftLL) - v.AuxInt = log2(c + 1) - v.AddArg(x) - v.AddArg(x) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: isPowerOfTwo(c+1) && int32(c) >= 7 // result: (RSBshiftLL x x [log2(c+1)]) @@ -7608,28 +7409,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) @@ -7652,28 +7431,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL x (MOVWconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) @@ -7696,28 +7453,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL x (MOVWconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) // result: (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) @@ -7740,28 +7475,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL x (MOVWconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARMSLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL (MOVWconst [c]) x) // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) @@ -7802,24 +7515,6 @@ func rewriteValueARM_OpARMMUL(v *Value) bool { v.AuxInt = int64(int32(c * d)) return true } - // match: (MUL (MOVWconst [d]) (MOVWconst [c])) - // cond: - // result: (MOVWconst [int64(int32(c*d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpARMMOVWconst) - v.AuxInt = int64(int32(c * d)) - return true - } return false } func rewriteValueARM_OpARMMULA(v *Value) bool { @@ -8587,31 +8282,31 @@ func rewriteValueARM_OpARMNotEqual(v *Value) bool { return false } func rewriteValueARM_OpARMOR(v *Value) bool { - // match: (OR x (MOVWconst [c])) + // match: (OR (MOVWconst [c]) x) // cond: // result: (ORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARMORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (OR (MOVWconst [c]) x) + // match: (OR x (MOVWconst [c])) // cond: // result: (ORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARMORconst) v.AuxInt = c v.AddArg(x) @@ -8932,7 +8627,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [c] (SRLconst x [32-c]) x) + // match: ( ORshiftLL [c] (SRLconst x [32-c]) x) // cond: // result: (SRRconst [32-c] x) for { @@ -9159,7 +8854,7 @@ func rewriteValueARM_OpARMORshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRL [c] (SLLconst x [32-c]) x) + // match: ( ORshiftRL [c] (SLLconst x [32-c]) x) // cond: // result: (SRRconst [ c] x) for { @@ -11415,6 +11110,21 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { return false } func rewriteValueARM_OpARMSUBS(v *Value) bool { + // match: (SUBS (MOVWconst [c]) x) + // cond: + // result: (RSBSconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMRSBSconst) + v.AuxInt = c + v.AddArg(x) + return true + } // match: (SUBS x (MOVWconst [c])) // cond: // result: (SUBSconst [c] x) @@ -12264,31 +11974,31 @@ func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool { return false } func rewriteValueARM_OpARMXOR(v *Value) bool { - // match: (XOR x (MOVWconst [c])) + // match: (XOR (MOVWconst [c]) x) // cond: // result: (XORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARMMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARMXORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (XOR (MOVWconst [c]) x) + // match: (XOR x (MOVWconst [c])) // cond: // result: (XORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARMXORconst) v.AuxInt = c v.AddArg(x) @@ -14776,7 +14486,7 @@ func rewriteValueARM_OpLsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (SLL x (ZeroExt8to32 y)) for { @@ -14882,7 +14592,7 @@ func rewriteValueARM_OpLsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (SLL x (ZeroExt8to32 y)) for { @@ -14988,7 +14698,7 @@ func rewriteValueARM_OpLsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (SLL x (ZeroExt8to32 y)) for { @@ -15929,7 +15639,7 @@ func rewriteValueARM_OpRsh16Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) for { @@ -16049,7 +15759,7 @@ func rewriteValueARM_OpRsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) for { @@ -16157,7 +15867,7 @@ func rewriteValueARM_OpRsh32Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (SRL x (ZeroExt8to32 y)) for { @@ -16259,7 +15969,7 @@ func rewriteValueARM_OpRsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SRA x (ZeroExt8to32 y)) for { @@ -16378,7 +16088,7 @@ func rewriteValueARM_OpRsh8Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) for { @@ -16498,7 +16208,7 @@ func rewriteValueARM_OpRsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) for { diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 57c5b04245..009e36b90f 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -714,31 +714,31 @@ func rewriteValueARM64(v *Value) bool { return false } func rewriteValueARM64_OpARM64ADD(v *Value) bool { - // match: (ADD x (MOVDconst [c])) + // match: (ADD (MOVDconst [c]) x) // cond: // result: (ADDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARM64ADDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ADD (MOVDconst [c]) x) + // match: (ADD x (MOVDconst [c])) // cond: // result: (ADDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARM64ADDconst) v.AuxInt = c v.AddArg(x) @@ -897,7 +897,7 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { v.AddArg(ptr) return true } - // match: (ADDconst [0] x) + // match: (ADDconst [0] x) // cond: // result: x for { @@ -1179,31 +1179,31 @@ func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool { return false } func rewriteValueARM64_OpARM64AND(v *Value) bool { - // match: (AND x (MOVDconst [c])) + // match: (AND (MOVDconst [c]) x) // cond: // result: (ANDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARM64ANDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (AND (MOVDconst [c]) x) + // match: (AND x (MOVDconst [c])) // cond: // result: (ANDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARM64ANDconst) v.AuxInt = c v.AddArg(x) @@ -1237,21 +1237,6 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { v.AddArg(y) return true } - // match: (AND (MVN y) x) - // cond: - // result: (BIC x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MVN { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64BIC) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (AND x (SLLconst [c] y)) // cond: // result: (ANDshiftLL x y [c]) @@ -1357,7 +1342,7 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { return false } func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { - // match: (ANDconst [0] _) + // match: (ANDconst [0] _) // cond: // result: (MOVDconst [0]) for { @@ -1681,7 +1666,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { return false } func rewriteValueARM64_OpARM64BICconst(v *Value) bool { - // match: (BICconst [0] x) + // match: (BICconst [0] x) // cond: // result: x for { @@ -2137,7 +2122,7 @@ func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool { return false } func rewriteValueARM64_OpARM64CMPconst(v *Value) bool { - // match: (CMPconst (MOVDconst [x]) [y]) + // match: (CMPconst (MOVDconst [x]) [y]) // cond: x==y // result: (FlagEQ) for { @@ -2153,7 +2138,7 @@ func rewriteValueARM64_OpARM64CMPconst(v *Value) bool { v.reset(OpARM64FlagEQ) return true } - // match: (CMPconst (MOVDconst [x]) [y]) + // match: (CMPconst (MOVDconst [x]) [y]) // cond: int64(x)uint64(y) // result: (FlagLT_UGT) for { @@ -2185,7 +2170,7 @@ func rewriteValueARM64_OpARM64CMPconst(v *Value) bool { v.reset(OpARM64FlagLT_UGT) return true } - // match: (CMPconst (MOVDconst [x]) [y]) + // match: (CMPconst (MOVDconst [x]) [y]) // cond: int64(x)>int64(y) && uint64(x)int64(y) && uint64(x)>uint64(y) // result: (FlagGT_UGT) for { @@ -2581,7 +2566,7 @@ func rewriteValueARM64_OpARM64CSELULT0(v *Value) bool { return false } func rewriteValueARM64_OpARM64DIV(v *Value) bool { - // match: (DIV (MOVDconst [c]) (MOVDconst [d])) + // match: (DIV (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(c)/int64(d)]) for { @@ -2602,7 +2587,7 @@ func rewriteValueARM64_OpARM64DIV(v *Value) bool { return false } func rewriteValueARM64_OpARM64DIVW(v *Value) bool { - // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) + // match: (DIVW (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(int32(c)/int32(d))]) for { @@ -3515,7 +3500,7 @@ func rewriteValueARM64_OpARM64LessThanU(v *Value) bool { return false } func rewriteValueARM64_OpARM64MOD(v *Value) bool { - // match: (MOD (MOVDconst [c]) (MOVDconst [d])) + // match: (MOD (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(c)%int64(d)]) for { @@ -3536,7 +3521,7 @@ func rewriteValueARM64_OpARM64MOD(v *Value) bool { return false } func rewriteValueARM64_OpARM64MODW(v *Value) bool { - // match: (MODW (MOVDconst [c]) (MOVDconst [d])) + // match: (MODW (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(int32(c)%int32(d))]) for { @@ -3763,7 +3748,7 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBreg (MOVDconst [c])) + // match: (MOVBreg (MOVDconst [c])) // cond: // result: (MOVDconst [int64(int8(c))]) for { @@ -4114,7 +4099,7 @@ func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVDreg (MOVDconst [c])) + // match: (MOVDreg (MOVDconst [c])) // cond: // result: (MOVDconst [c]) for { @@ -4534,7 +4519,7 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg (MOVDconst [c])) + // match: (MOVHreg (MOVDconst [c])) // cond: // result: (MOVDconst [int64(int16(c))]) for { @@ -5110,7 +5095,7 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWreg (MOVDconst [c])) + // match: (MOVWreg (MOVDconst [c])) // cond: // result: (MOVDconst [int64(int32(c))]) for { @@ -5312,22 +5297,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL (MOVDconst [-1]) x) - // cond: - // result: (NEG x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != -1 { - break - } - x := v.Args[1] - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } // match: (MUL _ (MOVDconst [0])) // cond: // result: (MOVDconst [0]) @@ -5343,21 +5312,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AuxInt = 0 return true } - // match: (MUL (MOVDconst [0]) _) - // cond: - // result: (MOVDconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 0 { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (MUL x (MOVDconst [1])) // cond: // result: x @@ -5375,23 +5329,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL (MOVDconst [1]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - if v_0.AuxInt != 1 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) @@ -5410,24 +5347,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo(c-1) && c >= 3 // result: (ADDshiftLL x x [log2(c-1)]) @@ -5447,25 +5366,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && c >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c-1) && c >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } // match: (MUL x (MOVDconst [c])) // cond: isPowerOfTwo(c+1) && c >= 7 // result: (ADDshiftLL (NEG x) x [log2(c+1)]) @@ -5487,27 +5387,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && c >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isPowerOfTwo(c+1) && c >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } // match: (MUL x (MOVDconst [c])) // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) @@ -5530,28 +5409,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL x (MOVDconst [c])) // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) @@ -5574,28 +5431,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL x (MOVDconst [c])) // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) @@ -5620,30 +5455,6 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (MUL x (MOVDconst [c])) // cond: c%9 == 0 && isPowerOfTwo(c/9) // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) @@ -5666,156 +5477,57 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { v.AddArg(v0) return true } - // match: (MUL (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + // match: (MUL (MOVDconst [-1]) x) + // cond: + // result: (NEG x) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9)) { + if v_0.AuxInt != -1 { break } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) + x := v.Args[1] + v.reset(OpARM64NEG) + v.AddArg(x) return true } - // match: (MUL (MOVDconst [c]) (MOVDconst [d])) + // match: (MUL (MOVDconst [0]) _) // cond: - // result: (MOVDconst [c*d]) + // result: (MOVDconst [0]) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + if v_0.AuxInt != 0 { break } - d := v_1.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = c * d + v.AuxInt = 0 return true } - // match: (MUL (MOVDconst [d]) (MOVDconst [c])) + // match: (MUL (MOVDconst [1]) x) // cond: - // result: (MOVDconst [c*d]) + // result: x for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = c * d - return true - } - return false -} -func rewriteValueARM64_OpARM64MULW(v *Value) bool { - b := v.Block - _ = b - // match: (MULW x (MOVDconst [c])) - // cond: int32(c)==-1 - // result: (NEG x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == -1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==-1 - // result: (NEG x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if v_0.AuxInt != 1 { break } - c := v_0.AuxInt x := v.Args[1] - if !(int32(c) == -1) { - break - } - v.reset(OpARM64NEG) - v.AddArg(x) - return true - } - // match: (MULW _ (MOVDconst [c])) - // cond: int32(c)==0 - // result: (MOVDconst [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULW (MOVDconst [c]) _) - // cond: int32(c)==0 - // result: (MOVDconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - if !(int32(c) == 0) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: int32(c)==1 - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(int32(c) == 1) { - break - } v.reset(OpCopy) v.Type = x.Type v.AddArg(x) return true } - // match: (MULW (MOVDconst [c]) x) - // cond: int32(c)==1 - // result: x + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { @@ -5823,24 +5535,6 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(int32(c) == 1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c) - // result: (SLLconst [log2(c)] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt if !(isPowerOfTwo(c)) { break } @@ -5849,7 +5543,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(x) return true } - // match: (MULW (MOVDconst [c]) x) + // match: (MUL (MOVDconst [c]) x) // cond: isPowerOfTwo(c) // result: (SLLconst [log2(c)] x) for { @@ -5867,27 +5561,8 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(x) return true } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 - // result: (ADDshiftLL x x [log2(c-1)]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) - v.AddArg(x) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && c >= 3 // result: (ADDshiftLL x x [log2(c-1)]) for { v_0 := v.Args[0] @@ -5896,7 +5571,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + if !(isPowerOfTwo(c-1) && c >= 3) { break } v.reset(OpARM64ADDshiftLL) @@ -5905,29 +5580,8 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(x) return true } - // match: (MULW x (MOVDconst [c])) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 - // result: (ADDshiftLL (NEG x) x [log2(c+1)]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { - break - } - v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) - v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // match: (MUL (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && c >= 7 // result: (ADDshiftLL (NEG x) x [log2(c+1)]) for { v_0 := v.Args[0] @@ -5936,7 +5590,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + if !(isPowerOfTwo(c+1) && c >= 7) { break } v.reset(OpARM64ADDshiftLL) @@ -5947,30 +5601,8 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(x) return true } - // match: (MULW x (MOVDconst [c])) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) - // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // match: (MUL (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) for { v_0 := v.Args[0] @@ -5979,7 +5611,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } v.reset(OpARM64SLLconst) @@ -5991,30 +5623,8 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(v0) return true } - // match: (MULW x (MOVDconst [c])) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) - // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // match: (MUL (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) for { v_0 := v.Args[0] @@ -6023,7 +5633,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } v.reset(OpARM64SLLconst) @@ -6035,17 +5645,17 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(v0) return true } - // match: (MULW x (MOVDconst [c])) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // match: (MUL (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } v.reset(OpARM64SLLconst) @@ -6059,9 +5669,9 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(v0) return true } - // match: (MULW (MOVDconst [c]) x) - // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) - // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + // match: (MUL (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { @@ -6069,57 +5679,11 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } c := v_0.AuxInt x := v.Args[1] - if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) - v1.AddArg(x) - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW x (MOVDconst [c])) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) - v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 - v0.AddArg(x) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (MULW (MOVDconst [c]) x) - // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) - // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) + v.AuxInt = log2(c / 9) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) v0.AuxInt = 3 v0.AddArg(x) @@ -6127,9 +5691,9 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { v.AddArg(v0) return true } - // match: (MULW (MOVDconst [c]) (MOVDconst [d])) + // match: (MUL (MOVDconst [c]) (MOVDconst [d])) // cond: - // result: (MOVDconst [int64(int32(c)*int32(d))]) + // result: (MOVDconst [c*d]) for { v_0 := v.Args[0] if v_0.Op != OpARM64MOVDconst { @@ -6142,143 +5706,86 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { } d := v_1.AuxInt v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) + v.AuxInt = c * d return true } - // match: (MULW (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(int32(c)*int32(d))]) + return false +} +func rewriteValueARM64_OpARM64MULW(v *Value) bool { + b := v.Block + _ = b + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==-1 + // result: (NEG x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true - } - return false -} -func rewriteValueARM64_OpARM64MVN(v *Value) bool { - // match: (MVN (MOVDconst [c])) - // cond: - // result: (MOVDconst [^c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = ^c - return true - } - return false -} -func rewriteValueARM64_OpARM64NEG(v *Value) bool { - // match: (NEG (MOVDconst [c])) - // cond: - // result: (MOVDconst [-c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if !(int32(c) == -1) { break } - c := v_0.AuxInt - v.reset(OpARM64MOVDconst) - v.AuxInt = -c + v.reset(OpARM64NEG) + v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64NotEqual(v *Value) bool { - // match: (NotEqual (FlagEQ)) - // cond: + // match: (MULW _ (MOVDconst [c])) + // cond: int32(c)==0 // result: (MOVDconst [0]) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagEQ { + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } - // match: (NotEqual (FlagLT_ULT)) - // cond: - // result: (MOVDconst [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_ULT { + c := v_1.AuxInt + if !(int32(c) == 0) { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + v.AuxInt = 0 return true } - // match: (NotEqual (FlagLT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (MULW x (MOVDconst [c])) + // cond: int32(c)==1 + // result: x for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagLT_UGT { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 - return true - } - // match: (NotEqual (FlagGT_ULT)) - // cond: - // result: (MOVDconst [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_ULT { + c := v_1.AuxInt + if !(int32(c) == 1) { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (NotEqual (FlagGT_UGT)) - // cond: - // result: (MOVDconst [1]) + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64FlagGT_UGT { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - v.reset(OpARM64MOVDconst) - v.AuxInt = 1 - return true - } - // match: (NotEqual (InvertFlags x)) - // cond: - // result: (NotEqual x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64InvertFlags { + c := v_1.AuxInt + if !(isPowerOfTwo(c)) { break } - x := v_0.Args[0] - v.reset(OpARM64NotEqual) + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) v.AddArg(x) return true } - return false -} -func rewriteValueARM64_OpARM64OR(v *Value) bool { - b := v.Block - _ = b - // match: (OR x (MOVDconst [c])) - // cond: - // result: (ORconst [c] x) + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) for { x := v.Args[0] v_1 := v.Args[1] @@ -6286,900 +5793,647 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { break } c := v_1.AuxInt - v.reset(OpARM64ORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVDconst [c]) x) - // cond: - // result: (ORconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpARM64ORconst) - v.AuxInt = c + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) v.AddArg(x) - return true - } - // match: (OR x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type v.AddArg(x) return true } - // match: (OR x (SLLconst [c] y)) - // cond: - // result: (ORshiftLL x y [c]) + // match: (MULW x (MOVDconst [c])) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SLLconst { + if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARM64ORshiftLL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (OR (SLLconst [c] y) x) - // cond: - // result: (ORshiftLL x y [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SLLconst { + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64ORshiftLL) - v.AuxInt = c + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) v.AddArg(x) - v.AddArg(y) return true } - // match: (OR x (SRLconst [c] y)) - // cond: - // result: (ORshiftRL x y [c]) + // match: (MULW x (MOVDconst [c])) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRLconst { + if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + break + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (OR (SRLconst [c] y) x) - // cond: - // result: (ORshiftRL x y [c]) + // match: (MULW x (MOVDconst [c])) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SRLconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64ORshiftRL) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + c := v_1.AuxInt + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + break + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (OR x (SRAconst [c] y)) - // cond: - // result: (ORshiftRA x y [c]) + // match: (MULW x (MOVDconst [c])) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpARM64SRAconst { + if v_1.Op != OpARM64MOVDconst { break } c := v_1.AuxInt - y := v_1.Args[0] - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + break + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (OR (SRAconst [c] y) x) - // cond: - // result: (ORshiftRA x y [c]) + // match: (MULW x (MOVDconst [c])) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64SRAconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpARM64ORshiftRA) - v.AuxInt = c - v.AddArg(x) - v.AddArg(y) + c := v_1.AuxInt + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (MULW (MOVDconst [c]) x) + // cond: int32(c)==-1 + // result: (NEG x) for { - t := v.Type - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o0.AuxInt != 8 { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == -1) { break } - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + v.reset(OpARM64NEG) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) _) + // cond: int32(c)==0 + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_0.AuxInt + if !(int32(c) == 0) { break } - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: int32(c)==1 + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if s0.AuxInt != 24 { + c := v_0.AuxInt + x := v.Args[1] + if !(int32(c) == 1) { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - i3 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (ADDshiftLL x x [log2(c-1)]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } - i2 := x1.AuxInt - if x1.Aux != s { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c - 1) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (ADDshiftLL (NEG x) x [log2(c+1)]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if p != x1.Args[0] { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } - if mem != x1.Args[1] { + v.reset(OpARM64ADDshiftLL) + v.AuxInt = log2(c + 1) + v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + c := v_0.AuxInt + x := v.Args[1] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 3) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 1 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - i1 := x2.AuxInt - if x2.Aux != s { + c := v_0.AuxInt + x := v.Args[1] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } - if p != x2.Args[0] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 5) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 2 + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if mem != x2.Args[1] { + c := v_0.AuxInt + x := v.Args[1] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i0 := x3.AuxInt - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if mem != x3.Args[1] { + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 7) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (MULW (MOVDconst [c]) x) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_0.AuxInt + x := v.Args[1] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) + v.reset(OpARM64SLLconst) + v.AuxInt = log2(c / 9) + v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) + v0.AuxInt = 3 + v0.AddArg(x) + v0.AddArg(x) v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (MULW (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c)*int32(d))]) for { - t := v.Type - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - i0 := x3.AuxInt - s := x3.Aux - p := x3.Args[0] - mem := x3.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - if o0.AuxInt != 8 { + d := v_1.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = int64(int32(c) * int32(d)) + return true + } + return false +} +func rewriteValueARM64_OpARM64MVN(v *Value) bool { + // match: (MVN (MOVDconst [c])) + // cond: + // result: (MOVDconst [^c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueARM64_OpARM64NEG(v *Value) bool { + // match: (NEG (MOVDconst [c])) + // cond: + // result: (MOVDconst [-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if o1.AuxInt != 16 { + c := v_0.AuxInt + v.reset(OpARM64MOVDconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueARM64_OpARM64NotEqual(v *Value) bool { + // match: (NotEqual (FlagEQ)) + // cond: + // result: (MOVDconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagEQ { break } - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { + v.reset(OpARM64MOVDconst) + v.AuxInt = 0 + return true + } + // match: (NotEqual (FlagLT_ULT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_ULT { break } - if s0.AuxInt != 24 { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagLT_UGT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagLT_UGT { break } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagGT_ULT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_ULT { break } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (FlagGT_UGT)) + // cond: + // result: (MOVDconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64FlagGT_UGT { break } - i3 := x0.AuxInt - if x0.Aux != s { + v.reset(OpARM64MOVDconst) + v.AuxInt = 1 + return true + } + // match: (NotEqual (InvertFlags x)) + // cond: + // result: (NotEqual x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64InvertFlags { break } - if p != x0.Args[0] { + x := v_0.Args[0] + v.reset(OpARM64NotEqual) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM64_OpARM64OR(v *Value) bool { + b := v.Block + _ = b + // match: (OR (MOVDconst [c]) x) + // cond: + // result: (ORconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - if mem != x0.Args[1] { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARM64ORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x (MOVDconst [c])) + // cond: + // result: (ORconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { + c := v_1.AuxInt + v.reset(OpARM64ORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (OR x s:(SLLconst [c] y)) + // cond: s.Uses == 1 && clobber(s) + // result: (ORshiftLL x y [c]) + for { + x := v.Args[0] + s := v.Args[1] + if s.Op != OpARM64SLLconst { break } - i2 := x1.AuxInt - if x1.Aux != s { + c := s.AuxInt + y := s.Args[0] + if !(s.Uses == 1 && clobber(s)) { break } - if p != x1.Args[0] { + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR s:(SLLconst [c] y) x) + // cond: s.Uses == 1 && clobber(s) + // result: (ORshiftLL x y [c]) + for { + s := v.Args[0] + if s.Op != OpARM64SLLconst { break } - if mem != x1.Args[1] { + c := s.AuxInt + y := s.Args[0] + x := v.Args[1] + if !(s.Uses == 1 && clobber(s)) { break } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR x (SLLconst [c] y)) + // cond: + // result: (ORshiftLL x y [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SLLconst { break } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SLLconst [c] y) x) + // cond: + // result: (ORshiftLL x y [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SLLconst { break } - i1 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.Aux = s - v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 - v1.AddArg(p) - v0.AddArg(v1) - v0.AddArg(mem) - return true - } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { - break - } - if o2.AuxInt != 24 { - break - } - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { - break - } - if o3.AuxInt != 32 { - break - } - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { - break - } - if o4.AuxInt != 40 { - break - } - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { - break - } - if o5.AuxInt != 48 { - break - } - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { - break - } - if s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i7 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i6 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i5 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i4 := x3.AuxInt - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i3 := x4.AuxInt - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i2 := x5.AuxInt - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i1 := x6.AuxInt - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if mem != x6.Args[1] { - break - } - y7 := v.Args[1] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i0 := x7.AuxInt - if x7.Aux != s { - break - } - if p != x7.Args[0] { - break - } - if mem != x7.Args[1] { - break - } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) - for { - t := v.Type - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { - break - } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { - break - } - i0 := x7.AuxInt - s := x7.Aux - p := x7.Args[0] - mem := x7.Args[1] - o0 := v.Args[1] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - o2 := o1.Args[0] - if o2.Op != OpARM64ORshiftLL { - break - } - if o2.AuxInt != 24 { - break - } - o3 := o2.Args[0] - if o3.Op != OpARM64ORshiftLL { - break - } - if o3.AuxInt != 32 { - break - } - o4 := o3.Args[0] - if o4.Op != OpARM64ORshiftLL { - break - } - if o4.AuxInt != 40 { - break - } - o5 := o4.Args[0] - if o5.Op != OpARM64ORshiftLL { - break - } - if o5.AuxInt != 48 { - break - } - s0 := o5.Args[0] - if s0.Op != OpARM64SLLconst { - break - } - if s0.AuxInt != 56 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i7 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y1 := o5.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i6 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y2 := o4.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i5 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - y3 := o3.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i4 := x3.AuxInt - if x3.Aux != s { - break - } - if p != x3.Args[0] { - break - } - if mem != x3.Args[1] { - break - } - y4 := o2.Args[1] - if y4.Op != OpARM64MOVDnop { - break - } - x4 := y4.Args[0] - if x4.Op != OpARM64MOVBUload { - break - } - i3 := x4.AuxInt - if x4.Aux != s { - break - } - if p != x4.Args[0] { - break - } - if mem != x4.Args[1] { - break - } - y5 := o1.Args[1] - if y5.Op != OpARM64MOVDnop { - break - } - x5 := y5.Args[0] - if x5.Op != OpARM64MOVBUload { - break - } - i2 := x5.AuxInt - if x5.Aux != s { - break - } - if p != x5.Args[0] { - break - } - if mem != x5.Args[1] { - break - } - y6 := o0.Args[1] - if y6.Op != OpARM64MOVDnop { - break - } - x6 := y6.Args[0] - if x6.Op != OpARM64MOVBUload { - break - } - i1 := x6.AuxInt - if x6.Aux != s { - break - } - if p != x6.Args[0] { - break - } - if mem != x6.Args[1] { - break - } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { - break - } - b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) - v0 := b.NewValue0(v.Pos, OpARM64REV, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64ORshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + // match: (OR x (SRLconst [c] y)) + // cond: + // result: (ORshiftRL x y [c]) for { - t := v.Type - o0 := v.Args[0] - if o0.Op != OpARM64ORshiftLL { - break - } - if o0.AuxInt != 8 { - break - } - o1 := o0.Args[0] - if o1.Op != OpARM64ORshiftLL { - break - } - if o1.AuxInt != 16 { - break - } - s0 := o1.Args[0] - if s0.Op != OpARM64SLLconst { - break - } - if s0.AuxInt != 24 { - break - } - y0 := s0.Args[0] - if y0.Op != OpARM64MOVDnop { - break - } - x0 := y0.Args[0] - if x0.Op != OpARM64MOVBUload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y1 := o1.Args[1] - if y1.Op != OpARM64MOVDnop { - break - } - x1 := y1.Args[0] - if x1.Op != OpARM64MOVBUload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y2 := o0.Args[1] - if y2.Op != OpARM64MOVDnop { - break - } - x2 := y2.Args[0] - if x2.Op != OpARM64MOVBUload { - break - } - i2 := x2.AuxInt - if x2.Aux != s { - break - } - if p != x2.Args[0] { - break - } - if mem != x2.Args[1] { - break - } - y3 := v.Args[1] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - if x3.Aux != s { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRLconst { break } - if p != x3.Args[0] { + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARM64ORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SRLconst [c] y) x) + // cond: + // result: (ORshiftRL x y [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRLconst { break } - if mem != x3.Args[1] { + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64ORshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR x (SRAconst [c] y)) + // cond: + // result: (ORshiftRA x y [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64SRAconst { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARM64ORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SRAconst [c] y) x) + // cond: + // result: (ORshiftRA x y [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARM64SRAconst { break } - b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) - v0.AddArg(v1) + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARM64ORshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) return true } - // match: (OR y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload {s} (OffPtr [i-3] p) mem) for { t := v.Type - y3 := v.Args[0] - if y3.Op != OpARM64MOVDnop { - break - } - x3 := y3.Args[0] - if x3.Op != OpARM64MOVBUload { - break - } - i3 := x3.AuxInt - s := x3.Aux - p := x3.Args[0] - mem := x3.Args[1] - o0 := v.Args[1] + o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } @@ -7208,16 +6462,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] y1 := o1.Args[1] if y1.Op != OpARM64MOVDnop { break @@ -7226,7 +6474,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } @@ -7244,7 +6494,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + if x2.AuxInt != i-2 { + break + } if x2.Aux != s { break } @@ -7254,26 +6506,44 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if mem != x2.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + break + } + if x3.AuxInt != i-3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { break } b = mergePoint(b, x0, x1, x2, x3) - v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) - v1.Aux = s - v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 - v2.AddArg(p) - v1.AddArg(v2) - v1.AddArg(mem) + v0.Aux = s + v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) + v1.AuxInt = i - 3 + v1.AddArg(p) v0.AddArg(v1) + v0.AddArg(mem) return true } - // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i-5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i-6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i-7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i-7] p) mem)) for { t := v.Type o0 := v.Args[0] @@ -7333,7 +6603,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -7345,7 +6615,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } @@ -7363,7 +6635,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + if x2.AuxInt != i-2 { + break + } if x2.Aux != s { break } @@ -7381,7 +6655,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i3 := x3.AuxInt + if x3.AuxInt != i-3 { + break + } if x3.Aux != s { break } @@ -7399,7 +6675,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i4 := x4.AuxInt + if x4.AuxInt != i-4 { + break + } if x4.Aux != s { break } @@ -7417,7 +6695,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - i5 := x5.AuxInt + if x5.AuxInt != i-5 { + break + } if x5.Aux != s { break } @@ -7435,7 +6715,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - i6 := x6.AuxInt + if x6.AuxInt != i-6 { + break + } if x6.Aux != s { break } @@ -7453,7 +6735,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x7.Op != OpARM64MOVBUload { break } - i7 := x7.AuxInt + if x7.AuxInt != i-7 { + break + } if x7.Aux != s { break } @@ -7463,7 +6747,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if mem != x7.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -7473,31 +6757,134 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 + v2.AuxInt = i - 7 v2.AddArg(p) v1.AddArg(v2) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (OR y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)) o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) - // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3) (REVW (MOVWUload {s} (OffPtr [i] p) mem)) for { t := v.Type - y7 := v.Args[0] - if y7.Op != OpARM64MOVDnop { + o0 := v.Args[0] + if o0.Op != OpARM64ORshiftLL { break } - x7 := y7.Args[0] - if x7.Op != OpARM64MOVBUload { + if o0.AuxInt != 8 { + break + } + o1 := o0.Args[0] + if o1.Op != OpARM64ORshiftLL { + break + } + if o1.AuxInt != 16 { + break + } + s0 := o1.Args[0] + if s0.Op != OpARM64SLLconst { + break + } + if s0.AuxInt != 24 { + break + } + y0 := s0.Args[0] + if y0.Op != OpARM64MOVDnop { + break + } + x0 := y0.Args[0] + if x0.Op != OpARM64MOVBUload { + break + } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + y1 := o1.Args[1] + if y1.Op != OpARM64MOVDnop { + break + } + x1 := y1.Args[0] + if x1.Op != OpARM64MOVBUload { + break + } + if x1.AuxInt != i+1 { + break + } + if x1.Aux != s { + break + } + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + y2 := o0.Args[1] + if y2.Op != OpARM64MOVDnop { + break + } + x2 := y2.Args[0] + if x2.Op != OpARM64MOVBUload { + break + } + if x2.AuxInt != i+2 { + break + } + if x2.Aux != s { + break + } + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { break } - i7 := x7.AuxInt - s := x7.Aux - p := x7.Args[0] - mem := x7.Args[1] - o0 := v.Args[1] + y3 := v.Args[1] + if y3.Op != OpARM64MOVDnop { + break + } + x3 := y3.Args[0] + if x3.Op != OpARM64MOVBUload { + break + } + if x3.AuxInt != i+3 { + break + } + if x3.Aux != s { + break + } + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(o0) && clobber(o1) && clobber(s0)) { + break + } + b = mergePoint(b, x0, x1, x2, x3) + v0 := b.NewValue0(v.Pos, OpARM64REVW, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) + v1.Aux = s + v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) + v2.AuxInt = i + v2.AddArg(p) + v1.AddArg(v2) + v1.AddArg(mem) + v0.AddArg(v1) + return true + } + // match: (OR o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i+2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i+3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i+4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i+5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i+6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i+7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV (MOVDload {s} (OffPtr [i] p) mem)) + for { + t := v.Type + o0 := v.Args[0] if o0.Op != OpARM64ORshiftLL { break } @@ -7554,16 +6941,10 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] y1 := o5.Args[1] if y1.Op != OpARM64MOVDnop { break @@ -7572,7 +6953,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != i+1 { + break + } if x1.Aux != s { break } @@ -7590,7 +6973,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + if x2.AuxInt != i+2 { + break + } if x2.Aux != s { break } @@ -7608,7 +6993,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i3 := x3.AuxInt + if x3.AuxInt != i+3 { + break + } if x3.Aux != s { break } @@ -7626,7 +7013,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i4 := x4.AuxInt + if x4.AuxInt != i+4 { + break + } if x4.Aux != s { break } @@ -7644,7 +7033,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x5.Op != OpARM64MOVBUload { break } - i5 := x5.AuxInt + if x5.AuxInt != i+5 { + break + } if x5.Aux != s { break } @@ -7662,7 +7053,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if x6.Op != OpARM64MOVBUload { break } - i6 := x6.AuxInt + if x6.AuxInt != i+6 { + break + } if x6.Aux != s { break } @@ -7672,7 +7065,27 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if mem != x6.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { + y7 := v.Args[1] + if y7.Op != OpARM64MOVDnop { + break + } + x7 := y7.Args[0] + if x7.Op != OpARM64MOVBUload { + break + } + if x7.AuxInt != i+7 { + break + } + if x7.Aux != s { + break + } + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(y5) && clobber(y6) && clobber(y7) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) && clobber(s0)) { break } b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) @@ -7682,7 +7095,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 + v2.AuxInt = i v2.AddArg(p) v1.AddArg(v2) v1.AddArg(mem) @@ -7692,7 +7105,7 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { return false } func rewriteValueARM64_OpARM64ORconst(v *Value) bool { - // match: (ORconst [0] x) + // match: (ORconst [0] x) // cond: // result: x for { @@ -7705,7 +7118,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { v.AddArg(x) return true } - // match: (ORconst [-1] _) + // match: (ORconst [-1] _) // cond: // result: (MOVDconst [-1]) for { @@ -7716,7 +7129,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { v.AuxInt = -1 return true } - // match: (ORconst [c] (MOVDconst [d])) + // match: (ORconst [c] (MOVDconst [d])) // cond: // result: (MOVDconst [c|d]) for { @@ -7730,7 +7143,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { v.AuxInt = c | d return true } - // match: (ORconst [c] (ORconst [d] x)) + // match: (ORconst [c] (ORconst [d] x)) // cond: // result: (ORconst [c|d] x) for { @@ -7751,7 +7164,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { b := v.Block _ = b - // match: (ORshiftLL (MOVDconst [c]) x [d]) + // match: (ORshiftLL (MOVDconst [c]) x [d]) // cond: // result: (ORconst [c] (SLLconst x [d])) for { @@ -7770,7 +7183,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(v0) return true } - // match: (ORshiftLL x (MOVDconst [c]) [d]) + // match: (ORshiftLL x (MOVDconst [c]) [d]) // cond: // result: (ORconst x [int64(uint64(c)< [c] (SRLconst (MOVWUreg x) [32-c]) x) + // match: ( ORshiftLL [c] (SRLconst (MOVWUreg x) [32-c]) x) // cond: c < 32 && t.Size() == 4 // result: (RORWconst [32-c] x) for { @@ -7858,9 +7271,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i0] p) mem) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i+1] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (MOVHUload {s} (OffPtr [i] p) mem) for { t := v.Type if v.AuxInt != 8 { @@ -7874,7 +7287,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -7886,7 +7299,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != i+1 { + break + } if x1.Aux != s { break } @@ -7896,7 +7311,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x1.Args[1] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { break } b = mergePoint(b, x0, x1) @@ -7905,15 +7320,15 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(v0) v0.Aux = s v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 + v1.AuxInt = i v1.AddArg(p) v0.AddArg(v1) v0.AddArg(mem) return true } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem))) - // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i0] p) mem) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] x0:(MOVHUload [i] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i+2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i+3] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWUload {s} (OffPtr [i] p) mem) for { t := v.Type if v.AuxInt != 24 { @@ -7930,7 +7345,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVHUload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -7942,7 +7357,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i2 := x1.AuxInt + if x1.AuxInt != i+2 { + break + } if x1.Aux != s { break } @@ -7960,7 +7377,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i3 := x2.AuxInt + if x2.AuxInt != i+3 { + break + } if x2.Aux != s { break } @@ -7970,7 +7389,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x2.Args[1] { break } - if !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y1) && clobber(y2) && clobber(o0)) { break } b = mergePoint(b, x0, x1, x2) @@ -7979,15 +7398,15 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(v0) v0.Aux = s v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 + v1.AuxInt = i v1.AddArg(p) v0.AddArg(v1) v0.AddArg(mem) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem))) - // cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i0] p) mem) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i+4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i+5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i+6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i+7] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload {s} (OffPtr [i] p) mem) for { t := v.Type if v.AuxInt != 56 { @@ -8018,7 +7437,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVWUload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -8030,7 +7449,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i4 := x1.AuxInt + if x1.AuxInt != i+4 { + break + } if x1.Aux != s { break } @@ -8048,7 +7469,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i5 := x2.AuxInt + if x2.AuxInt != i+5 { + break + } if x2.Aux != s { break } @@ -8066,7 +7489,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i6 := x3.AuxInt + if x3.AuxInt != i+6 { + break + } if x3.Aux != s { break } @@ -8084,7 +7509,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i7 := x4.AuxInt + if x4.AuxInt != i+7 { + break + } if x4.Aux != s { break } @@ -8094,7 +7521,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x4.Args[1] { break } - if !(i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { break } b = mergePoint(b, x0, x1, x2, x3, x4) @@ -8103,15 +7530,15 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.AddArg(v0) v0.Aux = s v1 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v1.AuxInt = i0 + v1.AuxInt = i v1.AddArg(p) v0.AddArg(v1) v0.AddArg(mem) return true } - // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && (i0%2 == 0 || i0<256 && i0>-256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) - // result: @mergePoint(b,x0,x1) (REV16W (MOVHUload [i0] {s} p mem)) + // match: (ORshiftLL [8] y0:(MOVDnop x0:(MOVBUload [i] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) + // cond: ((i-1)%2 == 0 || i-1<256 && i-1>-256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1) + // result: @mergePoint(b,x0,x1) (REV16W (MOVHUload [i-1] {s} p mem)) for { t := v.Type if v.AuxInt != 8 { @@ -8125,7 +7552,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVBUload { break } - i1 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -8137,7 +7564,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i0 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } @@ -8147,7 +7576,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x1.Args[1] { break } - if !(i1 == i0+1 && (i0%2 == 0 || i0 < 256 && i0 > -256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { + if !(((i-1)%2 == 0 || i-1 < 256 && i-1 > -256 && !isArg(s) && !isAuto(s)) && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(y0) && clobber(y1)) { break } b = mergePoint(b, x0, x1) @@ -8155,16 +7584,16 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v.reset(OpCopy) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64MOVHUload, t) - v1.AuxInt = i0 + v1.AuxInt = i - 1 v1.Aux = s v1.AddArg(p) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) - // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i0] p) mem)) + // match: (ORshiftLL [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (REVW (MOVWUload {s} (OffPtr [i-2] p) mem)) for { t := v.Type if v.AuxInt != 24 { @@ -8185,7 +7614,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVHUload { break } - i2 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -8197,7 +7626,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i1 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } @@ -8215,7 +7646,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i0 := x2.AuxInt + if x2.AuxInt != i-2 { + break + } if x2.Aux != s { break } @@ -8225,7 +7658,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x2.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(o0)) { break } b = mergePoint(b, x0, x1, x2) @@ -8235,16 +7668,16 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARM64MOVWUload, t) v1.Aux = s v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 + v2.AuxInt = i - 2 v2.AddArg(p) v1.AddArg(v2) v1.AddArg(mem) v0.AddArg(v1) return true } - // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem))) - // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) - // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i0] p) mem)) + // match: (ORshiftLL [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i-1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i-2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i-3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i-4] {s} p mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2) + // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV (MOVDload {s} (OffPtr [i-4] p) mem)) for { t := v.Type if v.AuxInt != 56 { @@ -8279,7 +7712,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x0.Op != OpARM64MOVWUload { break } - i4 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] mem := x0.Args[1] @@ -8291,7 +7724,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x1.Op != OpARM64MOVBUload { break } - i3 := x1.AuxInt + if x1.AuxInt != i-1 { + break + } if x1.Aux != s { break } @@ -8309,7 +7744,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x2.Op != OpARM64MOVBUload { break } - i2 := x2.AuxInt + if x2.AuxInt != i-2 { + break + } if x2.Aux != s { break } @@ -8327,7 +7764,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x3.Op != OpARM64MOVBUload { break } - i1 := x3.AuxInt + if x3.AuxInt != i-3 { + break + } if x3.Aux != s { break } @@ -8345,7 +7784,9 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if x4.Op != OpARM64MOVBUload { break } - i0 := x4.AuxInt + if x4.AuxInt != i-4 { + break + } if x4.Aux != s { break } @@ -8355,7 +7796,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { if mem != x4.Args[1] { break } - if !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(y0) && clobber(y1) && clobber(y2) && clobber(y3) && clobber(y4) && clobber(o0) && clobber(o1) && clobber(o2)) { break } b = mergePoint(b, x0, x1, x2, x3, x4) @@ -8365,7 +7806,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { v1 := b.NewValue0(v.Pos, OpARM64MOVDload, t) v1.Aux = s v2 := b.NewValue0(v.Pos, OpOffPtr, p.Type) - v2.AuxInt = i0 + v2.AuxInt = i - 4 v2.AddArg(p) v1.AddArg(v2) v1.AddArg(mem) @@ -8377,7 +7818,7 @@ func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool { func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { b := v.Block _ = b - // match: (ORshiftRA (MOVDconst [c]) x [d]) + // match: (ORshiftRA (MOVDconst [c]) x [d]) // cond: // result: (ORconst [c] (SRAconst x [d])) for { @@ -8396,7 +7837,7 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { v.AddArg(v0) return true } - // match: (ORshiftRA x (MOVDconst [c]) [d]) + // match: (ORshiftRA x (MOVDconst [c]) [d]) // cond: // result: (ORconst x [int64(int64(c)>>uint64(d))]) for { @@ -8412,7 +7853,7 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRA x y:(SRAconst x [c]) [d]) + // match: (ORshiftRA x y:(SRAconst x [c]) [d]) // cond: c==d // result: y for { @@ -8439,7 +7880,7 @@ func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool { func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { b := v.Block _ = b - // match: (ORshiftRL (MOVDconst [c]) x [d]) + // match: (ORshiftRL (MOVDconst [c]) x [d]) // cond: // result: (ORconst [c] (SRLconst x [d])) for { @@ -8458,7 +7899,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v.AddArg(v0) return true } - // match: (ORshiftRL x (MOVDconst [c]) [d]) + // match: (ORshiftRL x (MOVDconst [c]) [d]) // cond: // result: (ORconst x [int64(uint64(c)>>uint64(d))]) for { @@ -8474,7 +7915,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRL x y:(SRLconst x [c]) [d]) + // match: (ORshiftRL x y:(SRLconst x [c]) [d]) // cond: c==d // result: y for { @@ -8496,7 +7937,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v.AddArg(y) return true } - // match: (ORshiftRL [c] (SLLconst x [64-c]) x) + // match: ( ORshiftRL [c] (SLLconst x [64-c]) x) // cond: // result: (RORconst [ c] x) for { @@ -8517,7 +7958,7 @@ func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool { v.AddArg(x) return true } - // match: (ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) + // match: ( ORshiftRL [c] (SLLconst x [32-c]) (MOVWUreg x)) // cond: c < 32 && t.Size() == 4 // result: (RORWconst [ c] x) for { @@ -8775,7 +8216,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { return false } func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { - // match: (SUBconst [0] x) + // match: (SUBconst [0] x) // cond: // result: x for { @@ -8992,7 +8433,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { v.AddArg(x) return true } - // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) + // match: (UDIV (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(uint64(c)/uint64(d))]) for { @@ -9103,7 +8544,7 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { v.AddArg(x) return true } - // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) + // match: (UMOD (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [int64(uint64(c)%uint64(d))]) for { @@ -9179,31 +8620,31 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { return false } func rewriteValueARM64_OpARM64XOR(v *Value) bool { - // match: (XOR x (MOVDconst [c])) + // match: (XOR (MOVDconst [c]) x) // cond: // result: (XORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpARM64XORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (XOR (MOVDconst [c]) x) + // match: (XOR x (MOVDconst [c])) // cond: // result: (XORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpARM64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpARM64XORconst) v.AuxInt = c v.AddArg(x) @@ -9326,7 +8767,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { return false } func rewriteValueARM64_OpARM64XORconst(v *Value) bool { - // match: (XORconst [0] x) + // match: (XORconst [0] x) // cond: // result: x for { @@ -9945,7 +9386,7 @@ func rewriteValueARM64_OpAtomicExchange64(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoad32(v *Value) bool { - // match: (AtomicLoad32 ptr mem) + // match: (AtomicLoad32 ptr mem) // cond: // result: (LDARW ptr mem) for { @@ -9958,7 +9399,7 @@ func rewriteValueARM64_OpAtomicLoad32(v *Value) bool { } } func rewriteValueARM64_OpAtomicLoad64(v *Value) bool { - // match: (AtomicLoad64 ptr mem) + // match: (AtomicLoad64 ptr mem) // cond: // result: (LDAR ptr mem) for { @@ -9984,7 +9425,7 @@ func rewriteValueARM64_OpAtomicLoadPtr(v *Value) bool { } } func rewriteValueARM64_OpAtomicOr8(v *Value) bool { - // match: (AtomicOr8 ptr val mem) + // match: (AtomicOr8 ptr val mem) // cond: // result: (LoweredAtomicOr8 ptr val mem) for { @@ -9999,7 +9440,7 @@ func rewriteValueARM64_OpAtomicOr8(v *Value) bool { } } func rewriteValueARM64_OpAtomicStore32(v *Value) bool { - // match: (AtomicStore32 ptr val mem) + // match: (AtomicStore32 ptr val mem) // cond: // result: (STLRW ptr val mem) for { @@ -10014,7 +9455,7 @@ func rewriteValueARM64_OpAtomicStore32(v *Value) bool { } } func rewriteValueARM64_OpAtomicStore64(v *Value) bool { - // match: (AtomicStore64 ptr val mem) + // match: (AtomicStore64 ptr val mem) // cond: // result: (STLR ptr val mem) for { @@ -12005,7 +11446,7 @@ func rewriteValueARM64_OpLsh16x32(v *Value) bool { func rewriteValueARM64_OpLsh16x64(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x64 x (MOVDconst [c])) + // match: (Lsh16x64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SLLconst x [c]) for { @@ -12023,7 +11464,7 @@ func rewriteValueARM64_OpLsh16x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh16x64 _ (MOVDconst [c])) + // match: (Lsh16x64 _ (MOVDconst [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { @@ -12066,7 +11507,7 @@ func rewriteValueARM64_OpLsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -12157,7 +11598,7 @@ func rewriteValueARM64_OpLsh32x32(v *Value) bool { func rewriteValueARM64_OpLsh32x64(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x64 x (MOVDconst [c])) + // match: (Lsh32x64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SLLconst x [c]) for { @@ -12175,7 +11616,7 @@ func rewriteValueARM64_OpLsh32x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh32x64 _ (MOVDconst [c])) + // match: (Lsh32x64 _ (MOVDconst [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { @@ -12218,7 +11659,7 @@ func rewriteValueARM64_OpLsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -12309,7 +11750,7 @@ func rewriteValueARM64_OpLsh64x32(v *Value) bool { func rewriteValueARM64_OpLsh64x64(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x64 x (MOVDconst [c])) + // match: (Lsh64x64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SLLconst x [c]) for { @@ -12327,7 +11768,7 @@ func rewriteValueARM64_OpLsh64x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh64x64 _ (MOVDconst [c])) + // match: (Lsh64x64 _ (MOVDconst [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { @@ -12370,7 +11811,7 @@ func rewriteValueARM64_OpLsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x8 x y) + // match: (Lsh64x8 x y) // cond: // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -12461,7 +11902,7 @@ func rewriteValueARM64_OpLsh8x32(v *Value) bool { func rewriteValueARM64_OpLsh8x64(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x64 x (MOVDconst [c])) + // match: (Lsh8x64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SLLconst x [c]) for { @@ -12479,7 +11920,7 @@ func rewriteValueARM64_OpLsh8x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh8x64 _ (MOVDconst [c])) + // match: (Lsh8x64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { @@ -12522,7 +11963,7 @@ func rewriteValueARM64_OpLsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -13635,7 +13076,7 @@ func rewriteValueARM64_OpRsh16Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -13732,7 +13173,7 @@ func rewriteValueARM64_OpRsh16x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x64 x (MOVDconst [c])) + // match: (Rsh16x64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SRAconst (SignExt16to64 x) [c]) for { @@ -13800,7 +13241,7 @@ func rewriteValueARM64_OpRsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { @@ -13963,7 +13404,7 @@ func rewriteValueARM64_OpRsh32Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -14060,7 +13501,7 @@ func rewriteValueARM64_OpRsh32x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x64 x (MOVDconst [c])) + // match: (Rsh32x64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SRAconst (SignExt32to64 x) [c]) for { @@ -14128,7 +13569,7 @@ func rewriteValueARM64_OpRsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { @@ -14281,7 +13722,7 @@ func rewriteValueARM64_OpRsh64Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64Ux8 x y) + // match: (Rsh64Ux8 x y) // cond: // result: (CSELULT (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -14370,7 +13811,7 @@ func rewriteValueARM64_OpRsh64x32(v *Value) bool { func rewriteValueARM64_OpRsh64x64(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x64 x (MOVDconst [c])) + // match: (Rsh64x64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SRAconst x [c]) for { @@ -14432,7 +13873,7 @@ func rewriteValueARM64_OpRsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x8 x y) + // match: (Rsh64x8 x y) // cond: // result: (SRA x (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { @@ -14528,7 +13969,7 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux64 x (MOVDconst [c])) + // match: (Rsh8Ux64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SRLconst (ZeroExt8to64 x) [c]) for { @@ -14548,7 +13989,7 @@ func rewriteValueARM64_OpRsh8Ux64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux64 _ (MOVDconst [c])) + // match: (Rsh8Ux64 _ (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { @@ -14593,7 +14034,7 @@ func rewriteValueARM64_OpRsh8Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) for { @@ -14690,7 +14131,7 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x64 x (MOVDconst [c])) + // match: (Rsh8x64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SRAconst (SignExt8to64 x) [c]) for { @@ -14710,7 +14151,7 @@ func rewriteValueARM64_OpRsh8x64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x64 x (MOVDconst [c])) + // match: (Rsh8x64 x (MOVDconst [c])) // cond: uint64(c) >= 8 // result: (SRAconst (SignExt8to64 x) [63]) for { @@ -14758,7 +14199,7 @@ func rewriteValueARM64_OpRsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 1045a4e5d1..19144108e7 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -686,7 +686,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { _ = config types := &b.Func.Config.Types _ = types - // match: (AtomicAnd8 ptr val mem) + // match: (AtomicAnd8 ptr val mem) // cond: !config.BigEndian // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] ptr))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] ptr))))) mem) for { @@ -735,7 +735,7 @@ func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool { v.AddArg(mem) return true } - // match: (AtomicAnd8 ptr val mem) + // match: (AtomicAnd8 ptr val mem) // cond: config.BigEndian // result: (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) (OR (SLL (ZeroExt8to32 val) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))) (NORconst [0] (SLL (MOVWconst [0xff]) (SLLconst [3] (ANDconst [3] (XORconst [3] ptr)))))) mem) for { @@ -825,7 +825,7 @@ func rewriteValueMIPS_OpAtomicExchange32(v *Value) bool { } } func rewriteValueMIPS_OpAtomicLoad32(v *Value) bool { - // match: (AtomicLoad32 ptr mem) + // match: (AtomicLoad32 ptr mem) // cond: // result: (LoweredAtomicLoad ptr mem) for { @@ -927,7 +927,7 @@ func rewriteValueMIPS_OpAtomicOr8(v *Value) bool { return false } func rewriteValueMIPS_OpAtomicStore32(v *Value) bool { - // match: (AtomicStore32 ptr val mem) + // match: (AtomicStore32 ptr val mem) // cond: // result: (LoweredAtomicStore ptr val mem) for { @@ -2777,31 +2777,31 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool { } } func rewriteValueMIPS_OpMIPSADD(v *Value) bool { - // match: (ADD x (MOVWconst [c])) + // match: (ADD (MOVWconst [c]) x) // cond: // result: (ADDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpMIPSADDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (ADD (MOVWconst [c]) x) + // match: (ADD x (MOVWconst [c])) // cond: // result: (ADDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPSMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpMIPSADDconst) v.AuxInt = c v.AddArg(x) @@ -2858,7 +2858,7 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool { v.AddArg(ptr) return true } - // match: (ADDconst [0] x) + // match: (ADDconst [0] x) // cond: // result: x for { @@ -2922,31 +2922,31 @@ func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool { func rewriteValueMIPS_OpMIPSAND(v *Value) bool { b := v.Block _ = b - // match: (AND x (MOVWconst [c])) + // match: (AND (MOVWconst [c]) x) // cond: // result: (ANDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpMIPSANDconst) v.AuxInt = c v.AddArg(x) return true } - // match: (AND (MOVWconst [c]) x) + // match: (AND x (MOVWconst [c])) // cond: // result: (ANDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPSMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpMIPSANDconst) v.AuxInt = c v.AddArg(x) @@ -2993,38 +2993,10 @@ func rewriteValueMIPS_OpMIPSAND(v *Value) bool { v.AddArg(v0) return true } - // match: (AND (SGTUconst [1] y) (SGTUconst [1] x)) - // cond: - // result: (SGTUconst [1] (OR x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUconst { - break - } - if v_0.AuxInt != 1 { - break - } - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUconst { - break - } - if v_1.AuxInt != 1 { - break - } - x := v_1.Args[0] - v.reset(OpMIPSSGTUconst) - v.AuxInt = 1 - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } return false } func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool { - // match: (ANDconst [0] _) + // match: (ANDconst [0] _) // cond: // result: (MOVWconst [0]) for { @@ -3375,7 +3347,7 @@ func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool { - // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -3518,7 +3490,7 @@ func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBreg (MOVWconst [c])) + // match: (MOVBreg (MOVWconst [c])) // cond: // result: (MOVWconst [int64(int8(c))]) for { @@ -3765,7 +3737,7 @@ func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool { - // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVDload [off1+off2] {sym} ptr mem) for { @@ -3892,7 +3864,7 @@ func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool { - // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVFload [off1+off2] {sym} ptr mem) for { @@ -4199,7 +4171,7 @@ func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool { - // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVHload [off1+off2] {sym} ptr mem) for { @@ -4390,7 +4362,7 @@ func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg (MOVWconst [c])) + // match: (MOVHreg (MOVWconst [c])) // cond: // result: (MOVWconst [int64(int16(c))]) for { @@ -4595,7 +4567,7 @@ func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool { - // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) + // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) // cond: (is16Bit(off1+off2) || x.Uses == 1) // result: (MOVWload [off1+off2] {sym} ptr mem) for { @@ -4680,7 +4652,7 @@ func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWreg (MOVWconst [c])) + // match: (MOVWreg (MOVWconst [c])) // cond: // result: (MOVWconst [c]) for { @@ -4843,7 +4815,7 @@ func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { - // match: (MUL (MOVWconst [0]) _) + // match: (MUL (MOVWconst [0]) _ ) // cond: // result: (MOVWconst [0]) for { @@ -4858,22 +4830,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { v.AuxInt = 0 return true } - // match: (MUL _ (MOVWconst [0])) - // cond: - // result: (MOVWconst [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (MUL (MOVWconst [1]) x) + // match: (MUL (MOVWconst [1]) x ) // cond: // result: x for { @@ -4890,24 +4847,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [-1]) x) + // match: (MUL (MOVWconst [-1]) x ) // cond: // result: (NEG x) for { @@ -4923,23 +4863,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [-1])) - // cond: - // result: (NEG x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpMIPSNEG) - v.AddArg(x) - return true - } - // match: (MUL (MOVWconst [c]) x) + // match: (MUL (MOVWconst [c]) x ) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { @@ -4957,24 +4881,6 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { v.AddArg(x) return true } - // match: (MUL x (MOVWconst [c])) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SLLconst [log2(int64(uint32(c)))] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - c := v_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true - } // match: (MUL (MOVWconst [c]) (MOVWconst [d])) // cond: // result: (MOVWconst [int64(int32(c)*int32(d))]) @@ -4993,24 +4899,6 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { v.AuxInt = int64(int32(c) * int32(d)) return true } - // match: (MUL (MOVWconst [d]) (MOVWconst [c])) - // cond: - // result: (MOVWconst [int64(int32(c)*int32(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { - break - } - c := v_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(c) * int32(d)) - return true - } return false } func rewriteValueMIPS_OpMIPSNEG(v *Value) bool { @@ -5030,31 +4918,31 @@ func rewriteValueMIPS_OpMIPSNEG(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSNOR(v *Value) bool { - // match: (NOR x (MOVWconst [c])) + // match: (NOR (MOVWconst [c]) x) // cond: // result: (NORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpMIPSNORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (NOR (MOVWconst [c]) x) + // match: (NOR x (MOVWconst [c])) // cond: // result: (NORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPSMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpMIPSNORconst) v.AuxInt = c v.AddArg(x) @@ -5082,37 +4970,37 @@ func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool { func rewriteValueMIPS_OpMIPSOR(v *Value) bool { b := v.Block _ = b - // match: (OR x (MOVWconst [c])) + // match: (OR (MOVWconst [c]) x) // cond: // result: (ORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpMIPSORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (OR (MOVWconst [c]) x) + // match: (OR x (MOVWconst [c])) // cond: // result: (ORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPSMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpMIPSORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (OR x x) + // match: (OR x x) // cond: // result: x for { @@ -5146,31 +5034,10 @@ func rewriteValueMIPS_OpMIPSOR(v *Value) bool { v.AddArg(v0) return true } - // match: (OR (SGTUzero y) (SGTUzero x)) - // cond: - // result: (SGTUzero (OR x y)) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSSGTUzero { - break - } - y := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSSGTUzero { - break - } - x := v_1.Args[0] - v.reset(OpMIPSSGTUzero) - v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } return false } func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { - // match: (ORconst [0] x) + // match: (ORconst [0] x) // cond: // result: x for { @@ -5183,7 +5050,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { v.AddArg(x) return true } - // match: (ORconst [-1] _) + // match: (ORconst [-1] _) // cond: // result: (MOVWconst [-1]) for { @@ -5227,7 +5094,7 @@ func rewriteValueMIPS_OpMIPSORconst(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSGT(v *Value) bool { - // match: (SGT (MOVWconst [c]) x) + // match: (SGT (MOVWconst [c]) x) // cond: // result: (SGTconst [c] x) for { @@ -5868,7 +5735,7 @@ func rewriteValueMIPS_OpMIPSSUB(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool { - // match: (SUBconst [0] x) + // match: (SUBconst [0] x) // cond: // result: x for { @@ -5930,31 +5797,31 @@ func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSXOR(v *Value) bool { - // match: (XOR x (MOVWconst [c])) + // match: (XOR (MOVWconst [c]) x) // cond: // result: (XORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPSMOVWconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPSMOVWconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] v.reset(OpMIPSXORconst) v.AuxInt = c v.AddArg(x) return true } - // match: (XOR (MOVWconst [c]) x) + // match: (XOR x (MOVWconst [c])) // cond: // result: (XORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMOVWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPSMOVWconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt v.reset(OpMIPSXORconst) v.AuxInt = c v.AddArg(x) @@ -5975,7 +5842,7 @@ func rewriteValueMIPS_OpMIPSXOR(v *Value) bool { return false } func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool { - // match: (XORconst [0] x) + // match: (XORconst [0] x) // cond: // result: x for { @@ -7910,33 +7777,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AddArg(v0) return true } - // match: (Select0 (MULTU (MOVWconst [c]) x)) - // cond: x.Op != OpMIPSMOVWconst - // result: (Select0 (MULTU (MOVWconst [c]) x )) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - c := v_0_0.AuxInt - x := v_0.Args[1] - if !(x.Op != OpMIPSMOVWconst) { - break - } - v.reset(OpSelect0) - v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32)) - v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Select0 (MULTU (MOVWconst [0]) _)) + // match: (Select0 (MULTU (MOVWconst [0]) _ )) // cond: // result: (MOVWconst [0]) for { @@ -7955,26 +7796,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AuxInt = 0 return true } - // match: (Select0 (MULTU _ (MOVWconst [0]))) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != 0 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select0 (MULTU (MOVWconst [1]) _)) + // match: (Select0 (MULTU (MOVWconst [1]) _ )) // cond: // result: (MOVWconst [0]) for { @@ -7993,26 +7815,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AuxInt = 0 return true } - // match: (Select0 (MULTU _ (MOVWconst [1]))) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != 1 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select0 (MULTU (MOVWconst [-1]) x)) + // match: (Select0 (MULTU (MOVWconst [-1]) x )) // cond: // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) for { @@ -8039,34 +7842,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AddArg(x) return true } - // match: (Select0 (MULTU x (MOVWconst [-1]))) - // cond: - // result: (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != -1 { - break - } - v.reset(OpMIPSCMOVZ) - v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type) - v0.AuxInt = -1 - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(x) - return true - } - // match: (Select0 (MULTU (MOVWconst [c]) x)) + // match: (Select0 (MULTU (MOVWconst [c]) x )) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SRLconst [32-log2(int64(uint32(c)))] x) for { @@ -8088,29 +7864,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AddArg(x) return true } - // match: (Select0 (MULTU x (MOVWconst [c]))) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SRLconst [32-log2(int64(uint32(c)))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSRLconst) - v.AuxInt = 32 - log2(int64(uint32(c))) - v.AddArg(x) - return true - } - // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) + // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // cond: // result: (MOVWconst [(c*d)>>32]) for { @@ -8132,29 +7886,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { v.AuxInt = (c * d) >> 32 return true } - // match: (Select0 (MULTU (MOVWconst [d]) (MOVWconst [c]))) - // cond: - // result: (MOVWconst [(c*d)>>32]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = (c * d) >> 32 - return true - } - // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) + // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) // cond: // result: (MOVWconst [int64(int32(c)%int32(d))]) for { @@ -8271,33 +8003,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AddArg(v0) return true } - // match: (Select1 (MULTU (MOVWconst [c]) x)) - // cond: x.Op != OpMIPSMOVWconst - // result: (Select1 (MULTU (MOVWconst [c]) x )) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - c := v_0_0.AuxInt - x := v_0.Args[1] - if !(x.Op != OpMIPSMOVWconst) { - break - } - v.reset(OpSelect1) - v0 := b.NewValue0(v.Pos, OpMIPSMULTU, MakeTuple(types.UInt32, types.UInt32)) - v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, types.UInt32) - v1.AuxInt = c - v0.AddArg(v1) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Select1 (MULTU (MOVWconst [0]) _)) + // match: (Select1 (MULTU (MOVWconst [0]) _ )) // cond: // result: (MOVWconst [0]) for { @@ -8316,26 +8022,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AuxInt = 0 return true } - // match: (Select1 (MULTU _ (MOVWconst [0]))) - // cond: - // result: (MOVWconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != 0 { - break - } - v.reset(OpMIPSMOVWconst) - v.AuxInt = 0 - return true - } - // match: (Select1 (MULTU (MOVWconst [1]) x)) + // match: (Select1 (MULTU (MOVWconst [1]) x )) // cond: // result: x for { @@ -8356,28 +8043,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULTU x (MOVWconst [1]))) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Select1 (MULTU (MOVWconst [-1]) x)) + // match: (Select1 (MULTU (MOVWconst [-1]) x )) // cond: // result: (NEG x) for { @@ -8398,28 +8064,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULTU x (MOVWconst [-1]))) - // cond: - // result: (NEG x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - if v_0_1.AuxInt != -1 { - break - } - v.reset(OpMIPSNEG) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Select1 (MULTU (MOVWconst [c]) x)) + // match: (Select1 (MULTU (MOVWconst [c]) x )) // cond: isPowerOfTwo(int64(uint32(c))) // result: (SLLconst [log2(int64(uint32(c)))] x) for { @@ -8441,29 +8086,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULTU x (MOVWconst [c]))) - // cond: isPowerOfTwo(int64(uint32(c))) - // result: (SLLconst [log2(int64(uint32(c)))] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(int64(uint32(c)))) { - break - } - v.reset(OpMIPSSLLconst) - v.AuxInt = log2(int64(uint32(c))) - v.AddArg(x) - return true - } - // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) + // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) // cond: // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))]) for { @@ -8485,29 +8108,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { v.AuxInt = int64(int32(uint32(c) * uint32(d))) return true } - // match: (Select1 (MULTU (MOVWconst [d]) (MOVWconst [c]))) - // cond: - // result: (MOVWconst [int64(int32(uint32(c)*uint32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPSMULTU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPSMOVWconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPSMOVWconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPSMOVWconst) - v.AuxInt = int64(int32(uint32(c) * uint32(d))) - return true - } - // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) + // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) // cond: // result: (MOVWconst [int64(int32(c)/int32(d))]) for { @@ -9581,7 +9182,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = no return true } - // match: (EQ (MOVWconst [0]) yes no) + // match: (EQ (MOVWconst [0]) yes no) // cond: // result: (First nil yes no) for { @@ -9600,7 +9201,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = no return true } - // match: (EQ (MOVWconst [c]) yes no) + // match: (EQ (MOVWconst [c]) yes no) // cond: c != 0 // result: (First nil no yes) for { @@ -10052,7 +9653,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = no return true } - // match: (NE (MOVWconst [0]) yes no) + // match: (NE (MOVWconst [0]) yes no) // cond: // result: (First nil no yes) for { @@ -10072,7 +9673,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = yes return true } - // match: (NE (MOVWconst [c]) yes no) + // match: (NE (MOVWconst [c]) yes no) // cond: c != 0 // result: (First nil yes no) for { diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 74d82003a5..e0f16a9f87 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -2692,7 +2692,7 @@ func rewriteValueMIPS64_OpLsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { @@ -2816,7 +2816,7 @@ func rewriteValueMIPS64_OpLsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { @@ -2940,7 +2940,7 @@ func rewriteValueMIPS64_OpLsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x8 x y) + // match: (Lsh64x8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { @@ -3064,7 +3064,7 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { @@ -3092,16 +3092,16 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { } } func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { - // match: (ADDV x (MOVVconst [c])) + // match: (ADDV (MOVVconst [c]) x) // cond: is32Bit(c) // result: (ADDVconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -3110,16 +3110,16 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { v.AddArg(x) return true } - // match: (ADDV (MOVVconst [c]) x) + // match: (ADDV x (MOVVconst [c])) // cond: is32Bit(c) // result: (ADDVconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -3179,7 +3179,7 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { v.AddArg(ptr) return true } - // match: (ADDVconst [0] x) + // match: (ADDVconst [0] x) // cond: // result: x for { @@ -3247,16 +3247,16 @@ func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { - // match: (AND x (MOVVconst [c])) + // match: (AND (MOVVconst [c]) x) // cond: is32Bit(c) // result: (ANDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -3265,16 +3265,16 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { v.AddArg(x) return true } - // match: (AND (MOVVconst [c]) x) + // match: (AND x (MOVVconst [c])) // cond: is32Bit(c) // result: (ANDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -3299,7 +3299,7 @@ func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool { - // match: (ANDconst [0] _) + // match: (ANDconst [0] _) // cond: // result: (MOVVconst [0]) for { @@ -3446,7 +3446,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool { - // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -3520,7 +3520,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBreg (MOVVconst [c])) + // match: (MOVBreg (MOVVconst [c])) // cond: // result: (MOVVconst [int64(int8(c))]) for { @@ -3788,7 +3788,7 @@ func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool { - // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVDload [off1+off2] {sym} ptr mem) for { @@ -3892,7 +3892,7 @@ func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool { - // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVFload [off1+off2] {sym} ptr mem) for { @@ -4110,7 +4110,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool { - // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVHload [off1+off2] {sym} ptr mem) for { @@ -4232,7 +4232,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVHreg (MOVVconst [c])) + // match: (MOVHreg (MOVVconst [c])) // cond: // result: (MOVVconst [int64(int16(c))]) for { @@ -4458,7 +4458,7 @@ func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool { - // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVVload [off1+off2] {sym} ptr mem) for { @@ -4520,7 +4520,7 @@ func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVVreg (MOVVconst [c])) + // match: (MOVVreg (MOVVconst [c])) // cond: // result: (MOVVconst [c]) for { @@ -4800,7 +4800,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool { - // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) + // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) // result: (MOVWload [off1+off2] {sym} ptr mem) for { @@ -4970,7 +4970,7 @@ func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWreg (MOVVconst [c])) + // match: (MOVWreg (MOVVconst [c])) // cond: // result: (MOVVconst [int64(int32(c))]) for { @@ -5170,16 +5170,16 @@ func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool { - // match: (NOR x (MOVVconst [c])) + // match: (NOR (MOVVconst [c]) x) // cond: is32Bit(c) // result: (NORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -5188,16 +5188,16 @@ func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool { v.AddArg(x) return true } - // match: (NOR (MOVVconst [c]) x) + // match: (NOR x (MOVVconst [c])) // cond: is32Bit(c) // result: (NORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -5226,16 +5226,16 @@ func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { - // match: (OR x (MOVVconst [c])) + // match: (OR (MOVVconst [c]) x) // cond: is32Bit(c) // result: (ORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -5244,16 +5244,16 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { v.AddArg(x) return true } - // match: (OR (MOVVconst [c]) x) + // match: (OR x (MOVVconst [c])) // cond: is32Bit(c) // result: (ORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -5262,7 +5262,7 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { v.AddArg(x) return true } - // match: (OR x x) + // match: (OR x x) // cond: // result: x for { @@ -5278,7 +5278,7 @@ func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { - // match: (ORconst [0] x) + // match: (ORconst [0] x) // cond: // result: x for { @@ -5291,7 +5291,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { v.AddArg(x) return true } - // match: (ORconst [-1] _) + // match: (ORconst [-1] _) // cond: // result: (MOVVconst [-1]) for { @@ -5338,7 +5338,7 @@ func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool { - // match: (SGT (MOVVconst [c]) x) + // match: (SGT (MOVVconst [c]) x) // cond: is32Bit(c) // result: (SGTconst [c] x) for { @@ -5902,7 +5902,7 @@ func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { - // match: (SUBVconst [0] x) + // match: (SUBVconst [0] x) // cond: // result: x for { @@ -5970,16 +5970,16 @@ func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { - // match: (XOR x (MOVVconst [c])) + // match: (XOR (MOVVconst [c]) x) // cond: is32Bit(c) // result: (XORconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpMIPS64MOVVconst { + v_0 := v.Args[0] + if v_0.Op != OpMIPS64MOVVconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -5988,16 +5988,16 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { v.AddArg(x) return true } - // match: (XOR (MOVVconst [c]) x) + // match: (XOR x (MOVVconst [c])) // cond: is32Bit(c) // result: (XORconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MOVVconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -6021,7 +6021,7 @@ func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool { return false } func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool { - // match: (XORconst [0] x) + // match: (XORconst [0] x) // cond: // result: x for { @@ -7352,7 +7352,7 @@ func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) for { @@ -7484,7 +7484,7 @@ func rewriteValueMIPS64_OpRsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) for { @@ -7616,7 +7616,7 @@ func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) for { @@ -7748,7 +7748,7 @@ func rewriteValueMIPS64_OpRsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) for { @@ -7874,7 +7874,7 @@ func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64Ux8 x y) + // match: (Rsh64Ux8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) for { @@ -7998,7 +7998,7 @@ func rewriteValueMIPS64_OpRsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x8 x y) + // match: (Rsh64x8 x y) // cond: // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) for { @@ -8128,7 +8128,7 @@ func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) for { @@ -8260,7 +8260,7 @@ func rewriteValueMIPS64_OpRsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) for { @@ -8331,7 +8331,7 @@ func rewriteValueMIPS64_OpSelect0(v *Value) bool { v.AddArg(x) return true } - // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) // cond: // result: (MOVVconst [int64(c)%int64(d)]) for { @@ -8398,26 +8398,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU (MOVVconst [-1]) x)) - // cond: - // result: (NEGV x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - if v_0_0.AuxInt != -1 { - break - } - x := v_0.Args[1] - v.reset(OpMIPS64NEGV) - v.AddArg(x) - return true - } // match: (Select1 (MULVU _ (MOVVconst [0]))) // cond: // result: (MOVVconst [0]) @@ -8437,25 +8417,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AuxInt = 0 return true } - // match: (Select1 (MULVU (MOVVconst [0]) _)) - // cond: - // result: (MOVVconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - if v_0_0.AuxInt != 0 { - break - } - v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 - return true - } // match: (Select1 (MULVU x (MOVVconst [1]))) // cond: // result: x @@ -8477,27 +8438,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU (MOVVconst [1]) x)) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - if v_0_0.AuxInt != 1 { - break - } - x := v_0.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Select1 (MULVU x (MOVVconst [c]))) // cond: isPowerOfTwo(c) // result: (SLLVconst [log2(c)] x) @@ -8520,28 +8460,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU (MOVVconst [c]) x)) - // cond: isPowerOfTwo(c) - // result: (SLLVconst [log2(c)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - c := v_0_0.AuxInt - x := v_0.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpMIPS64SLLVconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } // match: (Select1 (MULVU (MOVVconst [-1]) x)) // cond: // result: (NEGV x) @@ -8562,26 +8480,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU x (MOVVconst [-1]))) - // cond: - // result: (NEGV x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - if v_0_1.AuxInt != -1 { - break - } - v.reset(OpMIPS64NEGV) - v.AddArg(x) - return true - } // match: (Select1 (MULVU (MOVVconst [0]) _)) // cond: // result: (MOVVconst [0]) @@ -8601,25 +8499,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AuxInt = 0 return true } - // match: (Select1 (MULVU _ (MOVVconst [0]))) - // cond: - // result: (MOVVconst [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - if v_0_1.AuxInt != 0 { - break - } - v.reset(OpMIPS64MOVVconst) - v.AuxInt = 0 - return true - } // match: (Select1 (MULVU (MOVVconst [1]) x)) // cond: // result: x @@ -8641,27 +8520,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU x (MOVVconst [1]))) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - if v_0_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Select1 (MULVU (MOVVconst [c]) x)) // cond: isPowerOfTwo(c) // result: (SLLVconst [log2(c)] x) @@ -8684,28 +8542,6 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AddArg(x) return true } - // match: (Select1 (MULVU x (MOVVconst [c]))) - // cond: isPowerOfTwo(c) - // result: (SLLVconst [log2(c)] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - c := v_0_1.AuxInt - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpMIPS64SLLVconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } // match: (Select1 (DIVVU x (MOVVconst [1]))) // cond: // result: x @@ -8771,29 +8607,7 @@ func rewriteValueMIPS64_OpSelect1(v *Value) bool { v.AuxInt = c * d return true } - // match: (Select1 (MULVU (MOVVconst [d]) (MOVVconst [c]))) - // cond: - // result: (MOVVconst [c*d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpMIPS64MULVU { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpMIPS64MOVVconst { - break - } - d := v_0_0.AuxInt - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpMIPS64MOVVconst { - break - } - c := v_0_1.AuxInt - v.reset(OpMIPS64MOVVconst) - v.AuxInt = c * d - return true - } - // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) // cond: // result: (MOVVconst [int64(c)/int64(d)]) for { @@ -10020,7 +9834,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = no return true } - // match: (EQ (MOVVconst [0]) yes no) + // match: (EQ (MOVVconst [0]) yes no) // cond: // result: (First nil yes no) for { @@ -10039,7 +9853,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = no return true } - // match: (EQ (MOVVconst [c]) yes no) + // match: (EQ (MOVVconst [c]) yes no) // cond: c != 0 // result: (First nil no yes) for { @@ -10459,7 +10273,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = no return true } - // match: (NE (MOVVconst [0]) yes no) + // match: (NE (MOVVconst [0]) yes no) // cond: // result: (First nil no yes) for { @@ -10479,7 +10293,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = yes return true } - // match: (NE (MOVVconst [c]) yes no) + // match: (NE (MOVVconst [c]) yes no) // cond: c != 0 // result: (First nil yes no) for { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 703989611d..785fbd211f 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -598,7 +598,7 @@ func rewriteValuePPC64(v *Value) bool { return false } func rewriteValuePPC64_OpAdd16(v *Value) bool { - // match: (Add16 x y) + // match: (Add16 x y) // cond: // result: (ADD x y) for { @@ -611,7 +611,7 @@ func rewriteValuePPC64_OpAdd16(v *Value) bool { } } func rewriteValuePPC64_OpAdd32(v *Value) bool { - // match: (Add32 x y) + // match: (Add32 x y) // cond: // result: (ADD x y) for { @@ -637,7 +637,7 @@ func rewriteValuePPC64_OpAdd32F(v *Value) bool { } } func rewriteValuePPC64_OpAdd64(v *Value) bool { - // match: (Add64 x y) + // match: (Add64 x y) // cond: // result: (ADD x y) for { @@ -663,7 +663,7 @@ func rewriteValuePPC64_OpAdd64F(v *Value) bool { } } func rewriteValuePPC64_OpAdd8(v *Value) bool { - // match: (Add8 x y) + // match: (Add8 x y) // cond: // result: (ADD x y) for { @@ -741,7 +741,7 @@ func rewriteValuePPC64_OpAnd64(v *Value) bool { } } func rewriteValuePPC64_OpAnd8(v *Value) bool { - // match: (And8 x y) + // match: (And8 x y) // cond: // result: (AND x y) for { @@ -876,7 +876,7 @@ func rewriteValuePPC64_OpAtomicExchange64(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool { - // match: (AtomicLoad32 ptr mem) + // match: (AtomicLoad32 ptr mem) // cond: // result: (LoweredAtomicLoad32 ptr mem) for { @@ -889,7 +889,7 @@ func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool { } } func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool { - // match: (AtomicLoad64 ptr mem) + // match: (AtomicLoad64 ptr mem) // cond: // result: (LoweredAtomicLoad64 ptr mem) for { @@ -915,7 +915,7 @@ func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool { } } func rewriteValuePPC64_OpAtomicOr8(v *Value) bool { - // match: (AtomicOr8 ptr val mem) + // match: (AtomicOr8 ptr val mem) // cond: // result: (LoweredAtomicOr8 ptr val mem) for { @@ -930,7 +930,7 @@ func rewriteValuePPC64_OpAtomicOr8(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStore32(v *Value) bool { - // match: (AtomicStore32 ptr val mem) + // match: (AtomicStore32 ptr val mem) // cond: // result: (LoweredAtomicStore32 ptr val mem) for { @@ -945,7 +945,7 @@ func rewriteValuePPC64_OpAtomicStore32(v *Value) bool { } } func rewriteValuePPC64_OpAtomicStore64(v *Value) bool { - // match: (AtomicStore64 ptr val mem) + // match: (AtomicStore64 ptr val mem) // cond: // result: (LoweredAtomicStore64 ptr val mem) for { @@ -1035,7 +1035,7 @@ func rewriteValuePPC64_OpCom64(v *Value) bool { } } func rewriteValuePPC64_OpCom8(v *Value) bool { - // match: (Com8 x) + // match: (Com8 x) // cond: // result: (NOR x x) for { @@ -1047,7 +1047,7 @@ func rewriteValuePPC64_OpCom8(v *Value) bool { } } func rewriteValuePPC64_OpConst16(v *Value) bool { - // match: (Const16 [val]) + // match: (Const16 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1058,7 +1058,7 @@ func rewriteValuePPC64_OpConst16(v *Value) bool { } } func rewriteValuePPC64_OpConst32(v *Value) bool { - // match: (Const32 [val]) + // match: (Const32 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1080,7 +1080,7 @@ func rewriteValuePPC64_OpConst32F(v *Value) bool { } } func rewriteValuePPC64_OpConst64(v *Value) bool { - // match: (Const64 [val]) + // match: (Const64 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1102,7 +1102,7 @@ func rewriteValuePPC64_OpConst64F(v *Value) bool { } } func rewriteValuePPC64_OpConst8(v *Value) bool { - // match: (Const8 [val]) + // match: (Const8 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1320,7 +1320,7 @@ func rewriteValuePPC64_OpDiv16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div16 x y) + // match: (Div16 x y) // cond: // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) for { @@ -1358,7 +1358,7 @@ func rewriteValuePPC64_OpDiv16u(v *Value) bool { } } func rewriteValuePPC64_OpDiv32(v *Value) bool { - // match: (Div32 x y) + // match: (Div32 x y) // cond: // result: (DIVW x y) for { @@ -1397,7 +1397,7 @@ func rewriteValuePPC64_OpDiv32u(v *Value) bool { } } func rewriteValuePPC64_OpDiv64(v *Value) bool { - // match: (Div64 x y) + // match: (Div64 x y) // cond: // result: (DIVD x y) for { @@ -1440,7 +1440,7 @@ func rewriteValuePPC64_OpDiv8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8 x y) + // match: (Div8 x y) // cond: // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) for { @@ -1461,7 +1461,7 @@ func rewriteValuePPC64_OpDiv8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8u x y) + // match: (Div8u x y) // cond: // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) for { @@ -2066,7 +2066,7 @@ func rewriteValuePPC64_OpGreater8U(v *Value) bool { } } func rewriteValuePPC64_OpHmul32(v *Value) bool { - // match: (Hmul32 x y) + // match: (Hmul32 x y) // cond: // result: (MULHW x y) for { @@ -2079,7 +2079,7 @@ func rewriteValuePPC64_OpHmul32(v *Value) bool { } } func rewriteValuePPC64_OpHmul32u(v *Value) bool { - // match: (Hmul32u x y) + // match: (Hmul32u x y) // cond: // result: (MULHWU x y) for { @@ -2092,7 +2092,7 @@ func rewriteValuePPC64_OpHmul32u(v *Value) bool { } } func rewriteValuePPC64_OpHmul64(v *Value) bool { - // match: (Hmul64 x y) + // match: (Hmul64 x y) // cond: // result: (MULHD x y) for { @@ -2105,7 +2105,7 @@ func rewriteValuePPC64_OpHmul64(v *Value) bool { } } func rewriteValuePPC64_OpHmul64u(v *Value) bool { - // match: (Hmul64u x y) + // match: (Hmul64u x y) // cond: // result: (MULHDU x y) for { @@ -2761,7 +2761,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x32 x (Const64 [c])) + // match: (Lsh16x32 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SLWconst x [c]) for { @@ -2779,7 +2779,7 @@ func rewriteValuePPC64_OpLsh16x32(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh16x32 x (MOVDconst [c])) + // match: (Lsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SLWconst x [c]) for { @@ -2824,7 +2824,7 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x64 x (Const64 [c])) + // match: (Lsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SLWconst x [c]) for { @@ -2842,7 +2842,7 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh16x64 _ (Const64 [c])) + // match: (Lsh16x64 _ (Const64 [c])) // cond: uint64(c) >= 16 // result: (MOVDconst [0]) for { @@ -2858,7 +2858,7 @@ func rewriteValuePPC64_OpLsh16x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh16x64 x (MOVDconst [c])) + // match: (Lsh16x64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SLWconst x [c]) for { @@ -2955,7 +2955,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x32 x (Const64 [c])) + // match: (Lsh32x32 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SLWconst x [c]) for { @@ -2973,7 +2973,7 @@ func rewriteValuePPC64_OpLsh32x32(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh32x32 x (MOVDconst [c])) + // match: (Lsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SLWconst x [c]) for { @@ -3018,7 +3018,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x64 x (Const64 [c])) + // match: (Lsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SLWconst x [c]) for { @@ -3036,7 +3036,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh32x64 _ (Const64 [c])) + // match: (Lsh32x64 _ (Const64 [c])) // cond: uint64(c) >= 32 // result: (MOVDconst [0]) for { @@ -3052,7 +3052,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh32x64 x (MOVDconst [c])) + // match: (Lsh32x64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SLWconst x [c]) for { @@ -3149,7 +3149,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x32 x (Const64 [c])) + // match: (Lsh64x32 x (Const64 [c])) // cond: uint32(c) < 64 // result: (SLDconst x [c]) for { @@ -3167,7 +3167,7 @@ func rewriteValuePPC64_OpLsh64x32(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh64x32 x (MOVDconst [c])) + // match: (Lsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SLDconst x [c]) for { @@ -3212,7 +3212,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x64 x (Const64 [c])) + // match: (Lsh64x64 x (Const64 [c])) // cond: uint64(c) < 64 // result: (SLDconst x [c]) for { @@ -3230,7 +3230,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh64x64 _ (Const64 [c])) + // match: (Lsh64x64 _ (Const64 [c])) // cond: uint64(c) >= 64 // result: (MOVDconst [0]) for { @@ -3246,7 +3246,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh64x64 x (MOVDconst [c])) + // match: (Lsh64x64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SLDconst x [c]) for { @@ -3343,7 +3343,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x32 x (Const64 [c])) + // match: (Lsh8x32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SLWconst x [c]) for { @@ -3361,7 +3361,7 @@ func rewriteValuePPC64_OpLsh8x32(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh8x32 x (MOVDconst [c])) + // match: (Lsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SLWconst x [c]) for { @@ -3406,7 +3406,7 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x64 x (Const64 [c])) + // match: (Lsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SLWconst x [c]) for { @@ -3424,7 +3424,7 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh8x64 _ (Const64 [c])) + // match: (Lsh8x64 _ (Const64 [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { @@ -3440,7 +3440,7 @@ func rewriteValuePPC64_OpLsh8x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh8x64 x (MOVDconst [c])) + // match: (Lsh8x64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SLWconst x [c]) for { @@ -4040,7 +4040,7 @@ func rewriteValuePPC64_OpMove(v *Value) bool { return false } func rewriteValuePPC64_OpMul16(v *Value) bool { - // match: (Mul16 x y) + // match: (Mul16 x y) // cond: // result: (MULLW x y) for { @@ -4053,7 +4053,7 @@ func rewriteValuePPC64_OpMul16(v *Value) bool { } } func rewriteValuePPC64_OpMul32(v *Value) bool { - // match: (Mul32 x y) + // match: (Mul32 x y) // cond: // result: (MULLW x y) for { @@ -4079,7 +4079,7 @@ func rewriteValuePPC64_OpMul32F(v *Value) bool { } } func rewriteValuePPC64_OpMul64(v *Value) bool { - // match: (Mul64 x y) + // match: (Mul64 x y) // cond: // result: (MULLD x y) for { @@ -4105,7 +4105,7 @@ func rewriteValuePPC64_OpMul64F(v *Value) bool { } } func rewriteValuePPC64_OpMul8(v *Value) bool { - // match: (Mul8 x y) + // match: (Mul8 x y) // cond: // result: (MULLW x y) for { @@ -4118,7 +4118,7 @@ func rewriteValuePPC64_OpMul8(v *Value) bool { } } func rewriteValuePPC64_OpNeg16(v *Value) bool { - // match: (Neg16 x) + // match: (Neg16 x) // cond: // result: (NEG x) for { @@ -4129,7 +4129,7 @@ func rewriteValuePPC64_OpNeg16(v *Value) bool { } } func rewriteValuePPC64_OpNeg32(v *Value) bool { - // match: (Neg32 x) + // match: (Neg32 x) // cond: // result: (NEG x) for { @@ -4151,7 +4151,7 @@ func rewriteValuePPC64_OpNeg32F(v *Value) bool { } } func rewriteValuePPC64_OpNeg64(v *Value) bool { - // match: (Neg64 x) + // match: (Neg64 x) // cond: // result: (NEG x) for { @@ -4173,7 +4173,7 @@ func rewriteValuePPC64_OpNeg64F(v *Value) bool { } } func rewriteValuePPC64_OpNeg8(v *Value) bool { - // match: (Neg8 x) + // match: (Neg8 x) // cond: // result: (NEG x) for { @@ -4451,7 +4451,7 @@ func rewriteValuePPC64_OpOr64(v *Value) bool { } } func rewriteValuePPC64_OpOr8(v *Value) bool { - // match: (Or8 x y) + // match: (Or8 x y) // cond: // result: (OR x y) for { @@ -4477,16 +4477,16 @@ func rewriteValuePPC64_OpOrB(v *Value) bool { } } func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { - // match: (ADD x (MOVDconst [c])) + // match: (ADD (MOVDconst [c]) x) // cond: is32Bit(c) // result: (ADDconst [c] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { break } - c := v_1.AuxInt + c := v_0.AuxInt + x := v.Args[1] if !(is32Bit(c)) { break } @@ -4495,16 +4495,16 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { v.AddArg(x) return true } - // match: (ADD (MOVDconst [c]) x) + // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) // result: (ADDconst [c] x) for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDconst { break } - c := v_0.AuxInt - x := v.Args[1] + c := v_1.AuxInt if !(is32Bit(c)) { break } @@ -4587,24 +4587,6 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v.AddArg(y) return true } - // match: (AND (NOR y y) x) - // cond: - // result: (ANDN x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64NOR { - break - } - y := v_0.Args[0] - if y != v_0.Args[1] { - break - } - x := v.Args[1] - v.reset(OpPPC64ANDN) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (AND (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [c&d]) @@ -4623,24 +4605,6 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v.AuxInt = c & d return true } - // match: (AND (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c&d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c & d - return true - } // match: (AND x (MOVDconst [c])) // cond: isU16Bit(c) // result: (ANDconst [c] x) @@ -4713,42 +4677,6 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v.AddArg(x) return true } - // match: (AND x:(MOVBZload _ _) (MOVDconst [c])) - // cond: - // result: (ANDconst [c&0xFF] x) - for { - x := v.Args[0] - if x.Op != OpPPC64MOVBZload { - break - } - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64ANDconst) - v.AuxInt = c & 0xFF - v.AddArg(x) - return true - } - // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) - // cond: - // result: (ANDconst [c&0xFF] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if x.Op != OpPPC64MOVBZload { - break - } - v.reset(OpPPC64ANDconst) - v.AuxInt = c & 0xFF - v.AddArg(x) - return true - } return false } func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { @@ -5379,23 +5307,6 @@ func rewriteValuePPC64_OpPPC64Equal(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FADD(v *Value) bool { - // match: (FADD (FMUL x y) z) - // cond: - // result: (FMADD x y z) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64FMUL { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - z := v.Args[1] - v.reset(OpPPC64FMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } // match: (FADD z (FMUL x y)) // cond: // result: (FMADD x y z) @@ -5413,26 +5324,26 @@ func rewriteValuePPC64_OpPPC64FADD(v *Value) bool { v.AddArg(z) return true } - return false -} -func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { - // match: (FADDS (FMULS x y) z) + // match: (FADD (FMUL x y) z) // cond: - // result: (FMADDS x y z) + // result: (FMADD x y z) for { v_0 := v.Args[0] - if v_0.Op != OpPPC64FMULS { + if v_0.Op != OpPPC64FMUL { break } x := v_0.Args[0] y := v_0.Args[1] z := v.Args[1] - v.reset(OpPPC64FMADDS) + v.reset(OpPPC64FMADD) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } + return false +} +func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { // match: (FADDS z (FMULS x y)) // cond: // result: (FMADDS x y z) @@ -5450,6 +5361,23 @@ func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool { v.AddArg(z) return true } + // match: (FADDS (FMULS x y) z) + // cond: + // result: (FMADDS x y z) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMULS { + break + } + x := v_0.Args[0] + y := v_0.Args[1] + z := v.Args[1] + v.reset(OpPPC64FMADDS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } return false } func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool { @@ -6963,27 +6891,6 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { v.AddArg(y) return true } - // match: (MOVWZreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0xFFFFFFFF - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - y_1 := y.Args[1] - if y_1.Op != OpPPC64MOVDconst { - break - } - c := y_1.AuxInt - if !(uint64(c) <= 0xFFFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } // match: (MOVWZreg y:(MOVWZreg _)) // cond: // result: y @@ -7127,27 +7034,6 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { v.AddArg(y) return true } - // match: (MOVWreg y:(AND _ (MOVDconst [c]))) - // cond: uint64(c) <= 0x7FFFFFFF - // result: y - for { - y := v.Args[0] - if y.Op != OpPPC64AND { - break - } - y_1 := y.Args[1] - if y_1.Op != OpPPC64MOVDconst { - break - } - c := y_1.AuxInt - if !(uint64(c) <= 0x7FFFFFFF) { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } // match: (MOVWreg y:(MOVWreg _)) // cond: // result: y @@ -7466,24 +7352,6 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v.AuxInt = c | d return true } - // match: (OR (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c|d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c | d - return true - } // match: (OR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (ORconst [c] x) @@ -7625,24 +7493,6 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { v.AuxInt = c ^ d return true } - // match: (XOR (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c^d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpPPC64MOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpPPC64MOVDconst) - v.AuxInt = c ^ d - return true - } // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (XORconst [c] x) @@ -7979,7 +7829,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x32 x (Const64 [c])) + // match: (Rsh16x32 x (Const64 [c])) // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { @@ -7999,7 +7849,7 @@ func rewriteValuePPC64_OpRsh16x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x32 x (MOVDconst [c])) + // match: (Rsh16x32 x (MOVDconst [c])) // cond: uint32(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { @@ -8048,7 +7898,7 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x64 x (Const64 [c])) + // match: (Rsh16x64 x (Const64 [c])) // cond: uint64(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { @@ -8088,7 +7938,7 @@ func rewriteValuePPC64_OpRsh16x64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x64 x (MOVDconst [c])) + // match: (Rsh16x64 x (MOVDconst [c])) // cond: uint64(c) < 16 // result: (SRAWconst (SignExt16to32 x) [c]) for { @@ -8385,7 +8235,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x32 x (Const64 [c])) + // match: (Rsh32x32 x (Const64 [c])) // cond: uint32(c) < 32 // result: (SRAWconst x [c]) for { @@ -8403,7 +8253,7 @@ func rewriteValuePPC64_OpRsh32x32(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh32x32 x (MOVDconst [c])) + // match: (Rsh32x32 x (MOVDconst [c])) // cond: uint32(c) < 32 // result: (SRAWconst x [c]) for { @@ -8448,7 +8298,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x64 x (Const64 [c])) + // match: (Rsh32x64 x (Const64 [c])) // cond: uint64(c) < 32 // result: (SRAWconst x [c]) for { @@ -8484,7 +8334,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh32x64 x (MOVDconst [c])) + // match: (Rsh32x64 x (MOVDconst [c])) // cond: uint64(c) < 32 // result: (SRAWconst x [c]) for { @@ -8775,7 +8625,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x32 x (Const64 [c])) + // match: (Rsh64x32 x (Const64 [c])) // cond: uint32(c) < 64 // result: (SRADconst x [c]) for { @@ -8793,7 +8643,7 @@ func rewriteValuePPC64_OpRsh64x32(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh64x32 x (MOVDconst [c])) + // match: (Rsh64x32 x (MOVDconst [c])) // cond: uint32(c) < 64 // result: (SRADconst x [c]) for { @@ -8838,7 +8688,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x64 x (Const64 [c])) + // match: (Rsh64x64 x (Const64 [c])) // cond: uint64(c) < 64 // result: (SRADconst x [c]) for { @@ -8874,7 +8724,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh64x64 x (MOVDconst [c])) + // match: (Rsh64x64 x (MOVDconst [c])) // cond: uint64(c) < 64 // result: (SRADconst x [c]) for { @@ -8973,7 +8823,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux32 x (Const64 [c])) + // match: (Rsh8Ux32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { @@ -8993,7 +8843,7 @@ func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux32 x (MOVDconst [c])) + // match: (Rsh8Ux32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { @@ -9042,7 +8892,7 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux64 x (Const64 [c])) + // match: (Rsh8Ux64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { @@ -9062,7 +8912,7 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux64 _ (Const64 [c])) + // match: (Rsh8Ux64 _ (Const64 [c])) // cond: uint64(c) >= 8 // result: (MOVDconst [0]) for { @@ -9078,7 +8928,7 @@ func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Rsh8Ux64 x (MOVDconst [c])) + // match: (Rsh8Ux64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SRWconst (ZeroExt8to32 x) [c]) for { @@ -9183,7 +9033,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x32 x (Const64 [c])) + // match: (Rsh8x32 x (Const64 [c])) // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { @@ -9203,7 +9053,7 @@ func rewriteValuePPC64_OpRsh8x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x32 x (MOVDconst [c])) + // match: (Rsh8x32 x (MOVDconst [c])) // cond: uint32(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { @@ -9252,7 +9102,7 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x64 x (Const64 [c])) + // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { @@ -9272,7 +9122,7 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x64 x (Const64 [c])) + // match: (Rsh8x64 x (Const64 [c])) // cond: uint64(c) >= 8 // result: (SRAWconst (SignExt8to32 x) [63]) for { @@ -9292,7 +9142,7 @@ func rewriteValuePPC64_OpRsh8x64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x64 x (MOVDconst [c])) + // match: (Rsh8x64 x (MOVDconst [c])) // cond: uint64(c) < 8 // result: (SRAWconst (SignExt8to32 x) [c]) for { @@ -9397,7 +9247,7 @@ func rewriteValuePPC64_OpSignExt32to64(v *Value) bool { } } func rewriteValuePPC64_OpSignExt8to16(v *Value) bool { - // match: (SignExt8to16 x) + // match: (SignExt8to16 x) // cond: // result: (MOVBreg x) for { @@ -9408,7 +9258,7 @@ func rewriteValuePPC64_OpSignExt8to16(v *Value) bool { } } func rewriteValuePPC64_OpSignExt8to32(v *Value) bool { - // match: (SignExt8to32 x) + // match: (SignExt8to32 x) // cond: // result: (MOVBreg x) for { @@ -9419,7 +9269,7 @@ func rewriteValuePPC64_OpSignExt8to32(v *Value) bool { } } func rewriteValuePPC64_OpSignExt8to64(v *Value) bool { - // match: (SignExt8to64 x) + // match: (SignExt8to64 x) // cond: // result: (MOVBreg x) for { @@ -9595,7 +9445,7 @@ func rewriteValuePPC64_OpStore(v *Value) bool { return false } func rewriteValuePPC64_OpSub16(v *Value) bool { - // match: (Sub16 x y) + // match: (Sub16 x y) // cond: // result: (SUB x y) for { @@ -9608,7 +9458,7 @@ func rewriteValuePPC64_OpSub16(v *Value) bool { } } func rewriteValuePPC64_OpSub32(v *Value) bool { - // match: (Sub32 x y) + // match: (Sub32 x y) // cond: // result: (SUB x y) for { @@ -9634,7 +9484,7 @@ func rewriteValuePPC64_OpSub32F(v *Value) bool { } } func rewriteValuePPC64_OpSub64(v *Value) bool { - // match: (Sub64 x y) + // match: (Sub64 x y) // cond: // result: (SUB x y) for { @@ -9660,7 +9510,7 @@ func rewriteValuePPC64_OpSub64F(v *Value) bool { } } func rewriteValuePPC64_OpSub8(v *Value) bool { - // match: (Sub8 x y) + // match: (Sub8 x y) // cond: // result: (SUB x y) for { @@ -9686,7 +9536,7 @@ func rewriteValuePPC64_OpSubPtr(v *Value) bool { } } func rewriteValuePPC64_OpTrunc16to8(v *Value) bool { - // match: (Trunc16to8 x) + // match: (Trunc16to8 x) // cond: // result: (MOVBreg x) for { @@ -9708,7 +9558,7 @@ func rewriteValuePPC64_OpTrunc32to16(v *Value) bool { } } func rewriteValuePPC64_OpTrunc32to8(v *Value) bool { - // match: (Trunc32to8 x) + // match: (Trunc32to8 x) // cond: // result: (MOVBreg x) for { @@ -9741,7 +9591,7 @@ func rewriteValuePPC64_OpTrunc64to32(v *Value) bool { } } func rewriteValuePPC64_OpTrunc64to8(v *Value) bool { - // match: (Trunc64to8 x) + // match: (Trunc64to8 x) // cond: // result: (MOVBreg x) for { @@ -9791,7 +9641,7 @@ func rewriteValuePPC64_OpXor64(v *Value) bool { } } func rewriteValuePPC64_OpXor8(v *Value) bool { - // match: (Xor8 x y) + // match: (Xor8 x y) // cond: // result: (XOR x y) for { @@ -10192,7 +10042,7 @@ func rewriteValuePPC64_OpZeroExt32to64(v *Value) bool { } } func rewriteValuePPC64_OpZeroExt8to16(v *Value) bool { - // match: (ZeroExt8to16 x) + // match: (ZeroExt8to16 x) // cond: // result: (MOVBZreg x) for { @@ -10203,7 +10053,7 @@ func rewriteValuePPC64_OpZeroExt8to16(v *Value) bool { } } func rewriteValuePPC64_OpZeroExt8to32(v *Value) bool { - // match: (ZeroExt8to32 x) + // match: (ZeroExt8to32 x) // cond: // result: (MOVBZreg x) for { @@ -10214,7 +10064,7 @@ func rewriteValuePPC64_OpZeroExt8to32(v *Value) bool { } } func rewriteValuePPC64_OpZeroExt8to64(v *Value) bool { - // match: (ZeroExt8to64 x) + // match: (ZeroExt8to64 x) // cond: // result: (MOVBZreg x) for { diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index 72caf9405d..6740fe4cad 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -716,7 +716,7 @@ func rewriteValueS390X(v *Value) bool { return false } func rewriteValueS390X_OpAdd16(v *Value) bool { - // match: (Add16 x y) + // match: (Add16 x y) // cond: // result: (ADDW x y) for { @@ -729,7 +729,7 @@ func rewriteValueS390X_OpAdd16(v *Value) bool { } } func rewriteValueS390X_OpAdd32(v *Value) bool { - // match: (Add32 x y) + // match: (Add32 x y) // cond: // result: (ADDW x y) for { @@ -755,7 +755,7 @@ func rewriteValueS390X_OpAdd32F(v *Value) bool { } } func rewriteValueS390X_OpAdd64(v *Value) bool { - // match: (Add64 x y) + // match: (Add64 x y) // cond: // result: (ADD x y) for { @@ -781,7 +781,7 @@ func rewriteValueS390X_OpAdd64F(v *Value) bool { } } func rewriteValueS390X_OpAdd8(v *Value) bool { - // match: (Add8 x y) + // match: (Add8 x y) // cond: // result: (ADDW x y) for { @@ -859,7 +859,7 @@ func rewriteValueS390X_OpAnd64(v *Value) bool { } } func rewriteValueS390X_OpAnd8(v *Value) bool { - // match: (And8 x y) + // match: (And8 x y) // cond: // result: (ANDW x y) for { @@ -1191,7 +1191,7 @@ func rewriteValueS390X_OpCom64(v *Value) bool { } } func rewriteValueS390X_OpCom8(v *Value) bool { - // match: (Com8 x) + // match: (Com8 x) // cond: // result: (NOTW x) for { @@ -1202,7 +1202,7 @@ func rewriteValueS390X_OpCom8(v *Value) bool { } } func rewriteValueS390X_OpConst16(v *Value) bool { - // match: (Const16 [val]) + // match: (Const16 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1213,7 +1213,7 @@ func rewriteValueS390X_OpConst16(v *Value) bool { } } func rewriteValueS390X_OpConst32(v *Value) bool { - // match: (Const32 [val]) + // match: (Const32 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1235,7 +1235,7 @@ func rewriteValueS390X_OpConst32F(v *Value) bool { } } func rewriteValueS390X_OpConst64(v *Value) bool { - // match: (Const64 [val]) + // match: (Const64 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1257,7 +1257,7 @@ func rewriteValueS390X_OpConst64F(v *Value) bool { } } func rewriteValueS390X_OpConst8(v *Value) bool { - // match: (Const8 [val]) + // match: (Const8 [val]) // cond: // result: (MOVDconst [val]) for { @@ -1478,7 +1478,7 @@ func rewriteValueS390X_OpDiv16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div16 x y) + // match: (Div16 x y) // cond: // result: (DIVW (MOVHreg x) (MOVHreg y)) for { @@ -1520,7 +1520,7 @@ func rewriteValueS390X_OpDiv32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div32 x y) + // match: (Div32 x y) // cond: // result: (DIVW (MOVWreg x) y) for { @@ -1567,7 +1567,7 @@ func rewriteValueS390X_OpDiv32u(v *Value) bool { } } func rewriteValueS390X_OpDiv64(v *Value) bool { - // match: (Div64 x y) + // match: (Div64 x y) // cond: // result: (DIVD x y) for { @@ -1610,7 +1610,7 @@ func rewriteValueS390X_OpDiv8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8 x y) + // match: (Div8 x y) // cond: // result: (DIVW (MOVBreg x) (MOVBreg y)) for { @@ -1631,7 +1631,7 @@ func rewriteValueS390X_OpDiv8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8u x y) + // match: (Div8u x y) // cond: // result: (DIVWU (MOVBZreg x) (MOVBZreg y)) for { @@ -1652,7 +1652,7 @@ func rewriteValueS390X_OpEq16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Eq16 x y) + // match: (Eq16 x y) // cond: // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -1681,7 +1681,7 @@ func rewriteValueS390X_OpEq32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Eq32 x y) + // match: (Eq32 x y) // cond: // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -1731,7 +1731,7 @@ func rewriteValueS390X_OpEq64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Eq64 x y) + // match: (Eq64 x y) // cond: // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -1781,7 +1781,7 @@ func rewriteValueS390X_OpEq8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Eq8 x y) + // match: (Eq8 x y) // cond: // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -1810,7 +1810,7 @@ func rewriteValueS390X_OpEqB(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (EqB x y) + // match: (EqB x y) // cond: // result: (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -1864,7 +1864,7 @@ func rewriteValueS390X_OpGeq16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Geq16 x y) + // match: (Geq16 x y) // cond: // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -1922,7 +1922,7 @@ func rewriteValueS390X_OpGeq32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Geq32 x y) + // match: (Geq32 x y) // cond: // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -1997,7 +1997,7 @@ func rewriteValueS390X_OpGeq64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Geq64 x y) + // match: (Geq64 x y) // cond: // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -2072,7 +2072,7 @@ func rewriteValueS390X_OpGeq8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Geq8 x y) + // match: (Geq8 x y) // cond: // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -2101,7 +2101,7 @@ func rewriteValueS390X_OpGeq8U(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Geq8U x y) + // match: (Geq8U x y) // cond: // result: (MOVDGE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) for { @@ -2150,7 +2150,7 @@ func rewriteValueS390X_OpGreater16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Greater16 x y) + // match: (Greater16 x y) // cond: // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -2208,7 +2208,7 @@ func rewriteValueS390X_OpGreater32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Greater32 x y) + // match: (Greater32 x y) // cond: // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -2283,7 +2283,7 @@ func rewriteValueS390X_OpGreater64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Greater64 x y) + // match: (Greater64 x y) // cond: // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -2358,7 +2358,7 @@ func rewriteValueS390X_OpGreater8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Greater8 x y) + // match: (Greater8 x y) // cond: // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -2387,7 +2387,7 @@ func rewriteValueS390X_OpGreater8U(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Greater8U x y) + // match: (Greater8U x y) // cond: // result: (MOVDGT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) for { @@ -2416,7 +2416,7 @@ func rewriteValueS390X_OpHmul32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Hmul32 x y) + // match: (Hmul32 x y) // cond: // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) for { @@ -2460,7 +2460,7 @@ func rewriteValueS390X_OpHmul32u(v *Value) bool { } } func rewriteValueS390X_OpHmul64(v *Value) bool { - // match: (Hmul64 x y) + // match: (Hmul64 x y) // cond: // result: (MULHD x y) for { @@ -2597,7 +2597,7 @@ func rewriteValueS390X_OpLeq16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Leq16 x y) + // match: (Leq16 x y) // cond: // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -2655,7 +2655,7 @@ func rewriteValueS390X_OpLeq32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Leq32 x y) + // match: (Leq32 x y) // cond: // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -2730,7 +2730,7 @@ func rewriteValueS390X_OpLeq64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Leq64 x y) + // match: (Leq64 x y) // cond: // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -2805,7 +2805,7 @@ func rewriteValueS390X_OpLeq8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Leq8 x y) + // match: (Leq8 x y) // cond: // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -2834,7 +2834,7 @@ func rewriteValueS390X_OpLeq8U(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Leq8U x y) + // match: (Leq8U x y) // cond: // result: (MOVDLE (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) for { @@ -2863,7 +2863,7 @@ func rewriteValueS390X_OpLess16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Less16 x y) + // match: (Less16 x y) // cond: // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -2921,7 +2921,7 @@ func rewriteValueS390X_OpLess32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Less32 x y) + // match: (Less32 x y) // cond: // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -2996,7 +2996,7 @@ func rewriteValueS390X_OpLess64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Less64 x y) + // match: (Less64 x y) // cond: // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -3071,7 +3071,7 @@ func rewriteValueS390X_OpLess8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Less8 x y) + // match: (Less8 x y) // cond: // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -3100,7 +3100,7 @@ func rewriteValueS390X_OpLess8U(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Less8U x y) + // match: (Less8U x y) // cond: // result: (MOVDLT (MOVDconst [0]) (MOVDconst [1]) (CMPU (MOVBZreg x) (MOVBZreg y))) for { @@ -3343,7 +3343,7 @@ func rewriteValueS390X_OpLsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x8 x y) + // match: (Lsh16x8 x y) // cond: // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) for { @@ -3447,7 +3447,7 @@ func rewriteValueS390X_OpLsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x8 x y) + // match: (Lsh32x8 x y) // cond: // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) for { @@ -3551,7 +3551,7 @@ func rewriteValueS390X_OpLsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x8 x y) + // match: (Lsh64x8 x y) // cond: // result: (AND (SLD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) for { @@ -3655,7 +3655,7 @@ func rewriteValueS390X_OpLsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x8 x y) + // match: (Lsh8x8 x y) // cond: // result: (ANDW (SLW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) for { @@ -3683,7 +3683,7 @@ func rewriteValueS390X_OpMod16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod16 x y) + // match: (Mod16 x y) // cond: // result: (MODW (MOVHreg x) (MOVHreg y)) for { @@ -3725,7 +3725,7 @@ func rewriteValueS390X_OpMod32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod32 x y) + // match: (Mod32 x y) // cond: // result: (MODW (MOVWreg x) y) for { @@ -3759,7 +3759,7 @@ func rewriteValueS390X_OpMod32u(v *Value) bool { } } func rewriteValueS390X_OpMod64(v *Value) bool { - // match: (Mod64 x y) + // match: (Mod64 x y) // cond: // result: (MODD x y) for { @@ -3789,7 +3789,7 @@ func rewriteValueS390X_OpMod8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8 x y) + // match: (Mod8 x y) // cond: // result: (MODW (MOVBreg x) (MOVBreg y)) for { @@ -3810,7 +3810,7 @@ func rewriteValueS390X_OpMod8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mod8u x y) + // match: (Mod8u x y) // cond: // result: (MODWU (MOVBZreg x) (MOVBZreg y)) for { @@ -4233,7 +4233,7 @@ func rewriteValueS390X_OpMove(v *Value) bool { return false } func rewriteValueS390X_OpMul16(v *Value) bool { - // match: (Mul16 x y) + // match: (Mul16 x y) // cond: // result: (MULLW x y) for { @@ -4246,7 +4246,7 @@ func rewriteValueS390X_OpMul16(v *Value) bool { } } func rewriteValueS390X_OpMul32(v *Value) bool { - // match: (Mul32 x y) + // match: (Mul32 x y) // cond: // result: (MULLW x y) for { @@ -4272,7 +4272,7 @@ func rewriteValueS390X_OpMul32F(v *Value) bool { } } func rewriteValueS390X_OpMul64(v *Value) bool { - // match: (Mul64 x y) + // match: (Mul64 x y) // cond: // result: (MULLD x y) for { @@ -4298,7 +4298,7 @@ func rewriteValueS390X_OpMul64F(v *Value) bool { } } func rewriteValueS390X_OpMul8(v *Value) bool { - // match: (Mul8 x y) + // match: (Mul8 x y) // cond: // result: (MULLW x y) for { @@ -4315,7 +4315,7 @@ func rewriteValueS390X_OpNeg16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neg16 x) + // match: (Neg16 x) // cond: // result: (NEGW (MOVHreg x)) for { @@ -4328,7 +4328,7 @@ func rewriteValueS390X_OpNeg16(v *Value) bool { } } func rewriteValueS390X_OpNeg32(v *Value) bool { - // match: (Neg32 x) + // match: (Neg32 x) // cond: // result: (NEGW x) for { @@ -4350,7 +4350,7 @@ func rewriteValueS390X_OpNeg32F(v *Value) bool { } } func rewriteValueS390X_OpNeg64(v *Value) bool { - // match: (Neg64 x) + // match: (Neg64 x) // cond: // result: (NEG x) for { @@ -4376,7 +4376,7 @@ func rewriteValueS390X_OpNeg8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neg8 x) + // match: (Neg8 x) // cond: // result: (NEGW (MOVBreg x)) for { @@ -4393,7 +4393,7 @@ func rewriteValueS390X_OpNeq16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neq16 x y) + // match: (Neq16 x y) // cond: // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVHreg x) (MOVHreg y))) for { @@ -4422,7 +4422,7 @@ func rewriteValueS390X_OpNeq32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neq32 x y) + // match: (Neq32 x y) // cond: // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) for { @@ -4472,7 +4472,7 @@ func rewriteValueS390X_OpNeq64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neq64 x y) + // match: (Neq64 x y) // cond: // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) for { @@ -4522,7 +4522,7 @@ func rewriteValueS390X_OpNeq8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Neq8 x y) + // match: (Neq8 x y) // cond: // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -4551,7 +4551,7 @@ func rewriteValueS390X_OpNeqB(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (NeqB x y) + // match: (NeqB x y) // cond: // result: (MOVDNE (MOVDconst [0]) (MOVDconst [1]) (CMP (MOVBreg x) (MOVBreg y))) for { @@ -4712,7 +4712,7 @@ func rewriteValueS390X_OpOr64(v *Value) bool { } } func rewriteValueS390X_OpOr8(v *Value) bool { - // match: (Or8 x y) + // match: (Or8 x y) // cond: // result: (ORW x y) for { @@ -4850,7 +4850,7 @@ func rewriteValueS390X_OpRsh16Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16Ux8 x y) + // match: (Rsh16Ux8 x y) // cond: // result: (ANDW (SRW (MOVHZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))) for { @@ -4975,7 +4975,7 @@ func rewriteValueS390X_OpRsh16x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x8 x y) + // match: (Rsh16x8 x y) // cond: // result: (SRAW (MOVHreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [15]))))) for { @@ -5084,7 +5084,7 @@ func rewriteValueS390X_OpRsh32Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32Ux8 x y) + // match: (Rsh32Ux8 x y) // cond: // result: (ANDW (SRW x y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))) for { @@ -5197,7 +5197,7 @@ func rewriteValueS390X_OpRsh32x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x8 x y) + // match: (Rsh32x8 x y) // cond: // result: (SRAW x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [31]))))) for { @@ -5304,7 +5304,7 @@ func rewriteValueS390X_OpRsh64Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64Ux8 x y) + // match: (Rsh64Ux8 x y) // cond: // result: (AND (SRD x y) (SUBEcarrymask (CMPWUconst (MOVBZreg y) [63]))) for { @@ -5417,7 +5417,7 @@ func rewriteValueS390X_OpRsh64x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x8 x y) + // match: (Rsh64x8 x y) // cond: // result: (SRAD x (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [63]))))) for { @@ -5534,7 +5534,7 @@ func rewriteValueS390X_OpRsh8Ux8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux8 x y) + // match: (Rsh8Ux8 x y) // cond: // result: (ANDW (SRW (MOVBZreg x) y) (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))) for { @@ -5659,7 +5659,7 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8x8 x y) + // match: (Rsh8x8 x y) // cond: // result: (SRAW (MOVBreg x) (ORW y (NOTW (SUBEWcarrymask (CMPWUconst (MOVBZreg y) [7]))))) for { @@ -5724,9 +5724,9 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.AddArg(x) return true } - // match: (ADD (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (RLLGconst [c] x) + // match: (ADD (SLDconst x [c]) (SRDconst x [64-c])) + // cond: + // result: (RLLGconst [ c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSLDconst { @@ -5738,11 +5738,10 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { if v_1.Op != OpS390XSRDconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLGconst) @@ -5750,29 +5749,28 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.AddArg(x) return true } - // match: (ADD (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) + // match: (ADD (SRDconst x [c]) (SLDconst x [64-c])) + // cond: + // result: (RLLGconst [64-c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSRDconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XSLDconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLGconst) - v.AuxInt = c + v.AuxInt = 64 - c v.AddArg(x) return true } @@ -5798,7 +5796,7 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.AddArg(y) return true } - // match: (ADD (MOVDaddr [c] {s} y) x) + // match: (ADD (MOVDaddr [c] {s} x) y) // cond: x.Op != OpSB && y.Op != OpSB // result: (MOVDaddridx [c] {s} x y) for { @@ -5808,8 +5806,8 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { } c := v_0.AuxInt s := v_0.Aux - y := v_0.Args[0] - x := v.Args[1] + x := v_0.Args[0] + y := v.Args[1] if !(x.Op != OpSB && y.Op != OpSB) { break } @@ -5835,21 +5833,6 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.AddArg(y) return true } - // match: (ADD (NEG y) x) - // cond: - // result: (SUB x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XNEG { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpS390XSUB) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (ADDload [off] {sym} x ptr mem) @@ -5902,58 +5885,6 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { v.AddArg(mem) return true } - // match: (ADD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XADDW(v *Value) bool { @@ -5987,9 +5918,9 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.AddArg(x) return true } - // match: (ADDW (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (RLLconst [c] x) + // match: (ADDW (SLWconst x [c]) (SRWconst x [32-c])) + // cond: + // result: (RLLconst [ c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSLWconst { @@ -6001,11 +5932,10 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { if v_1.Op != OpS390XSRWconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLconst) @@ -6013,29 +5943,28 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.AddArg(x) return true } - // match: (ADDW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) + // match: (ADDW (SRWconst x [c]) (SLWconst x [32-c])) + // cond: + // result: (RLLconst [32-c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XSLWconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } @@ -6054,21 +5983,6 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.AddArg(y) return true } - // match: (ADDW (NEGW y) x) - // cond: - // result: (SUBW x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XNEGW { - break - } - y := v_0.Args[0] - x := v.Args[1] - v.reset(OpS390XSUBW) - v.AddArg(x) - v.AddArg(y) - return true - } // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) @@ -6121,58 +6035,6 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.AddArg(mem) return true } - // match: (ADDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (ADDWload [off] {sym} x ptr mem) @@ -6225,58 +6087,6 @@ func rewriteValueS390X_OpS390XADDW(v *Value) bool { v.AddArg(mem) return true } - // match: (ADDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ADDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ADDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XADDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XADDWconst(v *Value) bool { @@ -6480,22 +6290,6 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AddArg(x) return true } - // match: (AND x (MOVDconst [0xFF])) - // cond: - // result: (MOVBZreg x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - if v_1.AuxInt != 0xFF { - break - } - v.reset(OpS390XMOVBZreg) - v.AddArg(x) - return true - } // match: (AND (MOVDconst [0xFF]) x) // cond: // result: (MOVBZreg x) @@ -6512,19 +6306,19 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AddArg(x) return true } - // match: (AND x (MOVDconst [0xFFFF])) + // match: (AND x (MOVDconst [0xFF])) // cond: - // result: (MOVHZreg x) + // result: (MOVBZreg x) for { x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVDconst { break } - if v_1.AuxInt != 0xFFFF { + if v_1.AuxInt != 0xFF { break } - v.reset(OpS390XMOVHZreg) + v.reset(OpS390XMOVBZreg) v.AddArg(x) return true } @@ -6544,19 +6338,19 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AddArg(x) return true } - // match: (AND x (MOVDconst [0xFFFFFFFF])) + // match: (AND x (MOVDconst [0xFFFF])) // cond: - // result: (MOVWZreg x) + // result: (MOVHZreg x) for { x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVDconst { break } - if v_1.AuxInt != 0xFFFFFFFF { + if v_1.AuxInt != 0xFFFF { break } - v.reset(OpS390XMOVWZreg) + v.reset(OpS390XMOVHZreg) v.AddArg(x) return true } @@ -6576,6 +6370,22 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AddArg(x) return true } + // match: (AND x (MOVDconst [0xFFFFFFFF])) + // cond: + // result: (MOVWZreg x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != 0xFFFFFFFF { + break + } + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } // match: (AND (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [c&d]) @@ -6594,24 +6404,6 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AuxInt = c & d return true } - // match: (AND (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c&d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c & d - return true - } // match: (AND x x) // cond: // result: x @@ -6677,58 +6469,6 @@ func rewriteValueS390X_OpS390XAND(v *Value) bool { v.AddArg(mem) return true } - // match: (AND g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (AND x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XANDW(v *Value) bool { @@ -6827,58 +6567,6 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.AddArg(mem) return true } - // match: (ANDW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (ANDWload [off] {sym} x ptr mem) @@ -6931,58 +6619,6 @@ func rewriteValueS390X_OpS390XANDW(v *Value) bool { v.AddArg(mem) return true } - // match: (ANDW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (ANDW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ANDWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XANDWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XANDWconst(v *Value) bool { @@ -7599,23 +7235,6 @@ func rewriteValueS390X_OpS390XCMPconst(v *Value) bool { return false } func rewriteValueS390X_OpS390XFADD(v *Value) bool { - // match: (FADD (FMUL y z) x) - // cond: - // result: (FMADD x y z) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XFMUL { - break - } - y := v_0.Args[0] - z := v_0.Args[1] - x := v.Args[1] - v.reset(OpS390XFMADD) - v.AddArg(x) - v.AddArg(y) - v.AddArg(z) - return true - } // match: (FADD x (FMUL y z)) // cond: // result: (FMADD x y z) @@ -7633,26 +7252,26 @@ func rewriteValueS390X_OpS390XFADD(v *Value) bool { v.AddArg(z) return true } - return false -} -func rewriteValueS390X_OpS390XFADDS(v *Value) bool { - // match: (FADDS (FMULS y z) x) + // match: (FADD (FMUL y z) x) // cond: - // result: (FMADDS x y z) + // result: (FMADD x y z) for { v_0 := v.Args[0] - if v_0.Op != OpS390XFMULS { + if v_0.Op != OpS390XFMUL { break } y := v_0.Args[0] z := v_0.Args[1] x := v.Args[1] - v.reset(OpS390XFMADDS) + v.reset(OpS390XFMADD) v.AddArg(x) v.AddArg(y) v.AddArg(z) return true } + return false +} +func rewriteValueS390X_OpS390XFADDS(v *Value) bool { // match: (FADDS x (FMULS y z)) // cond: // result: (FMADDS x y z) @@ -7670,10 +7289,27 @@ func rewriteValueS390X_OpS390XFADDS(v *Value) bool { v.AddArg(z) return true } + // match: (FADDS (FMULS y z) x) + // cond: + // result: (FMADDS x y z) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XFMULS { + break + } + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpS390XFMADDS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } return false } func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool { - // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (FMOVDload [off1+off2] {sym} ptr mem) for { @@ -7979,7 +7615,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool { - // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (FMOVSload [off1+off2] {sym} ptr mem) for { @@ -8379,7 +8015,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVBZload [off1+off2] {sym} ptr mem) for { @@ -8402,7 +8038,7 @@ func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { @@ -8501,28 +8137,6 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) // cond: // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) @@ -8545,36 +8159,14 @@ func rewriteValueS390X_OpS390XMOVBZloadidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: - // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVBZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _)) - // cond: int64(uint8(c)) == c && int64(uint8(d)) == d - // result: (MOVDreg x) + return false +} +func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVBZreg x:(MOVDLT (MOVDconst [c]) (MOVDconst [d]) _)) + // cond: int64(uint8(c)) == c && int64(uint8(d)) == d + // result: (MOVDreg x) for { x := v.Args[0] if x.Op != OpS390XMOVDLT { @@ -8880,7 +8472,7 @@ func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBload(v *Value) bool { - // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) for { @@ -9055,7 +8647,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is20Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { @@ -9103,7 +8695,7 @@ func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { @@ -9653,30 +9245,6 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) // cond: // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) @@ -9701,30 +9269,6 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVBstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w mem) @@ -9773,63 +9317,20 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) + // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { break } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] + j := w0.AuxInt + w := w0.Args[0] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { break @@ -9850,7 +9351,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if x_2.Op != OpS390XSRDconst { break } - if x_2.AuxInt != 8 { + if x_2.AuxInt != j+8 { break } if w != x_2.Args[0] { @@ -9865,18 +9366,18 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.Aux = s v.AddArg(p) v.AddArg(idx) - v.AddArg(w) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w mem) for { i := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] + p := v.Args[0] + idx := v.Args[1] w := v.Args[2] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { @@ -9888,14 +9389,14 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if x.Aux != s { break } - if idx != x.Args[0] { + if p != x.Args[0] { break } - if p != x.Args[1] { + if idx != x.Args[1] { break } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { + if x_2.Op != OpS390XSRWconst { break } if x_2.AuxInt != 8 { @@ -9917,7 +9418,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) for { @@ -9926,7 +9427,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { p := v.Args[0] idx := v.Args[1] w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { + if w0.Op != OpS390XSRWconst { break } j := w0.AuxInt @@ -9948,7 +9449,7 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { break } x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { + if x_2.Op != OpS390XSRWconst { break } if x_2.AuxInt != j+8 { @@ -9970,20 +9471,22 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [j+8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) + // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - j := w0.AuxInt - w := w0.Args[0] + if v_2.AuxInt != 8 { + break + } + w := v_2.Args[0] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { break @@ -9994,49 +9497,42 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if x.Aux != s { break } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { + if p != x.Args[0] { break } - if x_2.AuxInt != j+8 { + if idx != x.Args[1] { break } - if w != x_2.Args[0] { + if w != x.Args[2] { break } mem := x.Args[3] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpS390XMOVHstoreidx) + v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) v.AddArg(idx) - v.AddArg(w0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRDconst [j+8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) + // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - j := w0.AuxInt - w := w0.Args[0] + j := v_2.AuxInt + w := v_2.Args[0] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { break @@ -10053,21 +9549,21 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if idx != x.Args[1] { break } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst { break } - if x_2.AuxInt != j+8 { + if w0.AuxInt != j-8 { break } - if w != x_2.Args[0] { + if w != w0.Args[0] { break } mem := x.Args[3] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpS390XMOVHstoreidx) + v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) @@ -10076,20 +9572,22 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRDconst [j+8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) + // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) for { i := v.AuxInt s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { break } - j := w0.AuxInt - w := w0.Args[0] + if v_2.AuxInt != 8 { + break + } + w := v_2.Args[0] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { break @@ -10100,44 +9598,42 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if x.Aux != s { break } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { + if p != x.Args[0] { break } - if x_2.AuxInt != j+8 { + if idx != x.Args[1] { break } - if w != x_2.Args[0] { + if w != x.Args[2] { break } mem := x.Args[3] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpS390XMOVHstoreidx) + v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) v.AddArg(idx) - v.AddArg(w0) + v.AddArg(w) v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) + // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) + // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) for { i := v.AuxInt s := v.Aux p := v.Args[0] idx := v.Args[1] - w := v.Args[2] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { + break + } + j := v_2.AuxInt + w := v_2.Args[0] x := v.Args[3] if x.Op != OpS390XMOVBstoreidx { break @@ -10154,20470 +9650,5582 @@ func rewriteValueS390X_OpS390XMOVBstoreidx(v *Value) bool { if idx != x.Args[1] { break } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { + w0 := x.Args[2] + if w0.Op != OpS390XSRWconst { break } - if x_2.AuxInt != 8 { + if w0.AuxInt != j-8 { break } - if w != x_2.Args[0] { + if w != w0.Args[0] { break } mem := x.Args[3] if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpS390XMOVHstoreidx) + v.reset(OpS390XMOVHBRstoreidx) v.AuxInt = i - 1 v.Aux = s v.AddArg(p) v.AddArg(idx) - v.AddArg(w) + v.AddArg(w0) v.AddArg(mem) return true } - // match: (MOVBstoreidx [i] {s} p idx w x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) + return false +} +func rewriteValueS390X_OpS390XMOVDEQ(v *Value) bool { + // match: (MOVDEQ x y (InvertFlags cmp)) + // cond: + // result: (MOVDEQ x y cmp) for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p (SRWconst [j+8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+8 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRDconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [8] w) x:(MOVBstoreidx [i-1] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 8 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} p idx w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVBstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVBstoreidx [i-1] {s} idx p w0:(SRWconst [j-8] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVHBRstoreidx [i-1] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVBstoreidx { - break - } - if x.AuxInt != i-1 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-8 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVHBRstoreidx) - v.AuxInt = i - 1 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDEQ(v *Value) bool { - // match: (MOVDEQ x y (InvertFlags cmp)) - // cond: - // result: (MOVDEQ x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDEQ) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDEQ _ x (FlagEQ)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDEQ y _ (FlagLT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDEQ y _ (FlagGT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDGE(v *Value) bool { - // match: (MOVDGE x y (InvertFlags cmp)) - // cond: - // result: (MOVDLE x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDLE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDGE _ x (FlagEQ)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDGE y _ (FlagLT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDGE _ x (FlagGT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDGT(v *Value) bool { - // match: (MOVDGT x y (InvertFlags cmp)) - // cond: - // result: (MOVDLT x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDLT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDGT y _ (FlagEQ)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDGT y _ (FlagLT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDGT _ x (FlagGT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDLE(v *Value) bool { - // match: (MOVDLE x y (InvertFlags cmp)) - // cond: - // result: (MOVDGE x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDGE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDLE _ x (FlagEQ)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDLE _ x (FlagLT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDLE y _ (FlagGT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDLT(v *Value) bool { - // match: (MOVDLT x y (InvertFlags cmp)) - // cond: - // result: (MOVDGT x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDGT) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDLT y _ (FlagEQ)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDLT _ x (FlagLT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDLT y _ (FlagGT)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDNE(v *Value) bool { - // match: (MOVDNE x y (InvertFlags cmp)) - // cond: - // result: (MOVDNE x y cmp) - for { - x := v.Args[0] - y := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XInvertFlags { - break - } - cmp := v_2.Args[0] - v.reset(OpS390XMOVDNE) - v.AddArg(x) - v.AddArg(y) - v.AddArg(cmp) - return true - } - // match: (MOVDNE y _ (FlagEQ)) - // cond: - // result: y - for { - y := v.Args[0] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagEQ { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (MOVDNE _ x (FlagLT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagLT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDNE _ x (FlagGT)) - // cond: - // result: x - for { - x := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XFlagGT { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { - // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) - // cond: is20Bit(c+d) && x.Op != OpSB - // result: (MOVDaddridx [c+d] {s} x y) - for { - c := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - y := v.Args[1] - if !(is20Bit(c+d) && x.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) - // cond: is20Bit(c+d) && y.Op != OpSB - // result: (MOVDaddridx [c+d] {s} x y) - for { - c := v.AuxInt - s := v.Aux - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - y := v_1.Args[0] - if !(is20Bit(c+d) && y.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = c + d - v.Aux = s - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB - // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] - y := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB - // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) - for { - off1 := v.AuxInt - sym1 := v.Aux - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDaddr { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - y := v_1.Args[0] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { - break - } - v.reset(OpS390XMOVDaddridx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(x) - v.AddArg(y) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { - // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVDreg x) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) - // result: (MOVDload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVDloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVDloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { - // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVDloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVDloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDnop(v *Value) bool { - b := v.Block - _ = b - // match: (MOVDnop x) - // cond: t.Compare(x.Type) == CMPeq - // result: x - for { - t := v.Type - x := v.Args[0] - if !(t.Compare(x.Type) == CMPeq) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDnop (MOVDconst [c])) - // cond: - // result: (MOVDconst [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c - return true - } - // match: (MOVDnop x:(MOVBZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVHZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVHload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVWZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVDload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVDload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVDload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVBZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVHZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVWZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDnop x:(MOVDloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVDloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVDreg x) - // cond: t.Compare(x.Type) == CMPeq - // result: x - for { - t := v.Type - x := v.Args[0] - if !(t.Compare(x.Type) == CMPeq) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MOVDreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c - return true - } - // match: (MOVDreg x) - // cond: x.Uses == 1 - // result: (MOVDnop x) - for { - x := v.Args[0] - if !(x.Uses == 1) { - break - } - v.reset(OpS390XMOVDnop) - v.AddArg(x) - return true - } - // match: (MOVDreg x:(MOVBZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVBload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVHZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVHload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVWZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVWload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVDload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVDload [off] {sym} ptr mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVDload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVBZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVHZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVWZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (MOVDreg x:(MOVDloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) - for { - t := v.Type - x := v.Args[0] - if x.Op != OpS390XMOVDloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, t) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { - // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) - // result: (MOVDstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB - // result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validValAndOff(c, off) && int64(int16(c)) == c && ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVDstoreconst) - v.AuxInt = makeValAndOff(c, off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVDstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) - // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x) - // result: (STMG2 [i-8] {s} p w0 w1 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w1 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XMOVDstore { - break - } - if x.AuxInt != i-8 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) { - break - } - v.reset(OpS390XSTMG2) - v.AuxInt = i - 8 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) - return true - } - // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-16) && clobber(x) - // result: (STMG3 [i-16] {s} p w0 w1 w2 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w2 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XSTMG2 { - break - } - if x.AuxInt != i-16 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - w1 := x.Args[2] - mem := x.Args[3] - if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) { - break - } - v.reset(OpS390XSTMG3) - v.AuxInt = i - 16 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) - return true - } - // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) - // cond: x.Uses == 1 && is20Bit(i-24) && clobber(x) - // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w3 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XSTMG3 { - break - } - if x.AuxInt != i-24 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - w1 := x.Args[2] - w2 := x.Args[3] - mem := x.Args[4] - if !(x.Uses == 1 && is20Bit(i-24) && clobber(x)) { - break - } - v.reset(OpS390XSTMG4) - v.AuxInt = i - 24 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { - // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVDstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVDstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { - // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVDstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { - // match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHBRstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHBRstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { - break - } - if v_1.AuxInt != 16 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHBRstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHBRstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRDconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - if v_2.AuxInt != 16 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHBRstoreidx [i] {s} idx p (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} idx p w0:(SRWconst [j-16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRWconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHBRstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - if w0.AuxInt != j-16 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWBRstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { - // match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVHZreg x) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVHstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpS390XMOVHZreg) - v.AddArg(x) - return true - } - // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) - // result: (MOVHZload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVHZload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHZload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVHZloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { - // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVHZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: - // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVHZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVHZreg x:(MOVBZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg x:(MOVHZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg x:(Arg )) - // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpArg { - break - } - t := x.Type - if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg x:(MOVBZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg x:(MOVHZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHZreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(uint16(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(uint16(c)) - return true - } - // match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { - // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) - // result: (MOVHload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVHload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVHreg x:(MOVBload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVHload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(Arg )) - // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t) - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpArg { - break - } - t := x.Type - if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVBZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg x:(MOVHreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVHreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(int16(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int16(c)) - return true - } - // match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVHload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { - // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVHreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) - // cond: - // result: (MOVHstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVHZreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVHstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) - // result: (MOVHstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: validOff(off) && ptr.Op != OpSB - // result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off) && ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHstoreconst) - v.AuxInt = makeValAndOff(int64(int16(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVHstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XMOVHstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst { - break - } - if x_1.AuxInt != 16 { - break - } - if w != x_1.Args[0] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w0 := v.Args[1] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst { - break - } - if x_1.AuxInt != j+16 { - break - } - if w != x_1.Args[0] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-2] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XMOVHstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst { - break - } - if x_1.AuxInt != 16 { - break - } - if w != x_1.Args[0] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVWstore [i-2] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w0 := v.Args[1] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVHstore { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - x_1 := x.Args[1] - if x_1.Op != OpS390XSRWconst { - break - } - if x_1.AuxInt != j+16 { - break - } - if w != x_1.Args[0] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { - // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVHstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVHstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) - // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) - // result: (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - x := v.Args[1] - if x.Op != OpS390XMOVHstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - mem := x.Args[1] - if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreconst) - v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xffff|ValAndOff(a).Val()<<16, ValAndOff(a).Off()) - v.Aux = s - v.AddArg(p) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { - // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVHstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRDconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != 16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVHstoreidx [i] {s} idx p w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} idx p (SRWconst [j+16] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRWconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVHstoreidx { - break - } - if x.AuxInt != i-2 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRWconst { - break - } - if x_2.AuxInt != j+16 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = i - 2 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { - // match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstore [i-4] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVWBRstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstore [i-4] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - j := v_1.AuxInt - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVWBRstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} idx p w mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - if v_2.AuxInt != 32 { - break - } - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - if w != x.Args[2] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} idx p w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWBRstoreidx [i] {s} idx p (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} idx p w0:(SRDconst [j-32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - v_2 := v.Args[2] - if v_2.Op != OpS390XSRDconst { - break - } - j := v_2.AuxInt - w := v_2.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWBRstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - w0 := x.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - if w0.AuxInt != j-32 { - break - } - if w != w0.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDBRstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { - // match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) - // result: (MOVWZreg x) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVWstore { - break - } - off2 := v_1.AuxInt - sym2 := v_1.Aux - ptr2 := v_1.Args[0] - x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { - break - } - v.reset(OpS390XMOVWZreg) - v.AddArg(x) - return true - } - // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) - // result: (MOVWZload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVWZload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWZload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) - // cond: ptr.Op != OpSB - // result: (MOVWZloadidx [off] {sym} ptr idx mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - mem := v.Args[1] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { - // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - // match: (MOVWZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: - // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - mem := v.Args[2] - v.reset(OpS390XMOVWZloadidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWZreg x:(MOVBZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(MOVHZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(MOVWZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(Arg )) - // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpArg { - break - } - t := x.Type - if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(MOVBZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(MOVHZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg x:(MOVWZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWZreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(uint32(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(uint32(c)) - return true - } - // match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - // match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWZloadidx { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - idx := x.Args[1] - mem := x.Args[2] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { - // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: is20Bit(off1+off2) - // result: (MOVWload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - mem := v.Args[1] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { - b := v.Block - _ = b - // match: (MOVWreg x:(MOVBload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHZload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHZload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVWload _ _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWload { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(Arg )) - // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpArg { - break - } - t := x.Type - if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVBZreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVBZreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVHreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVHreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg x:(MOVWreg _)) - // cond: - // result: (MOVDreg x) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWreg { - break - } - v.reset(OpS390XMOVDreg) - v.AddArg(x) - return true - } - // match: (MOVWreg (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(int32(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int32(c)) - return true - } - // match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem)) - // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) - for { - x := v.Args[0] - if x.Op != OpS390XMOVWZload { - break - } - off := x.AuxInt - sym := x.Aux - ptr := x.Args[0] - mem := x.Args[1] - if !(x.Uses == 1 && clobber(x)) { - break - } - b = x.Block - v0 := b.NewValue0(v.Pos, OpS390XMOVWload, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = off - v0.Aux = sym - v0.AddArg(ptr) - v0.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { - // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVWreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) - // cond: - // result: (MOVWstore [off] {sym} ptr x mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVWZreg { - break - } - x := v_1.Args[0] - mem := v.Args[2] - v.reset(OpS390XMOVWstore) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(x) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: is20Bit(off1+off2) - // result: (MOVWstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is20Bit(off1 + off2)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) - // cond: validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB - // result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) - for { - off := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - mem := v.Args[2] - if !(validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWstoreconst) - v.AuxInt = makeValAndOff(int64(int32(c)), off) - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - base := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(base) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) - // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) - // result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) - for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddridx { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) - // cond: ptr.Op != OpSB - // result: (MOVWstoreidx [off] {sym} ptr idx val mem) - for { - off := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADD { - break - } - ptr := v_0.Args[0] - idx := v_0.Args[1] - val := v.Args[1] - mem := v.Args[2] - if !(ptr.Op != OpSB) { - break - } - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = off - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVDstore [i-4] {s} p w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - if v_1.AuxInt != 32 { - break - } - w := v_1.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVWstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if w != x.Args[1] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) - // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) - // result: (MOVDstore [i-4] {s} p w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w0 := v.Args[1] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[2] - if x.Op != OpS390XMOVWstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - x_1 := x.Args[1] - if x_1.Op != OpS390XSRDconst { - break - } - if x_1.AuxInt != j+32 { - break - } - if w != x_1.Args[0] { - break - } - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) - // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x) - // result: (STM2 [i-4] {s} p w0 w1 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w1 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XMOVWstore { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - mem := x.Args[2] - if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) { - break - } - v.reset(OpS390XSTM2) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) - // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) - // result: (STM3 [i-8] {s} p w0 w1 w2 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w2 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XSTM2 { - break - } - if x.AuxInt != i-8 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - w1 := x.Args[2] - mem := x.Args[3] - if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) { - break - } - v.reset(OpS390XSTM3) - v.AuxInt = i - 8 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(mem) - return true - } - // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) - // cond: x.Uses == 1 && is20Bit(i-12) && clobber(x) - // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - w3 := v.Args[1] - x := v.Args[2] - if x.Op != OpS390XSTM3 { - break - } - if x.AuxInt != i-12 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - w0 := x.Args[1] - w1 := x.Args[2] - w2 := x.Args[3] - mem := x.Args[4] - if !(x.Uses == 1 && is20Bit(i-12) && clobber(x)) { - break - } - v.reset(OpS390XSTM4) - v.AuxInt = i - 12 - v.Aux = s - v.AddArg(p) - v.AddArg(w0) - v.AddArg(w1) - v.AddArg(w2) - v.AddArg(w3) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) - // cond: ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) - for { - sc := v.AuxInt - s := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - off := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - if !(ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = s - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) - // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) - // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) - for { - sc := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDaddr { - break - } - off := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { - break - } - v.reset(OpS390XMOVWstoreconst) - v.AuxInt = ValAndOff(sc).add(off) - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) - // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) - // result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) - for { - c := v.AuxInt - s := v.Aux - p := v.Args[0] - x := v.Args[1] - if x.Op != OpS390XMOVWstoreconst { - break - } - a := x.AuxInt - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - mem := x.Args[1] - if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { - break - } - v.reset(OpS390XMOVDstore) - v.AuxInt = ValAndOff(a).Off() - v.Aux = s - v.AddArg(p) - v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64) - v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 - v.AddArg(v0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { - // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - ptr := v_0.Args[0] - idx := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - idx := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - ptr := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - ptr := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XADDconst { - break - } - d := v_1.AuxInt - idx := v_1.Args[0] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) - for { - c := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - d := v_0.AuxInt - idx := v_0.Args[0] - ptr := v.Args[1] - val := v.Args[2] - mem := v.Args[3] - v.reset(OpS390XMOVWstoreidx) - v.AuxInt = c + d - v.Aux = sym - v.AddArg(ptr) - v.AddArg(idx) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w := v.Args[2] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != 32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - p := v.Args[0] - idx := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if p != x.Args[0] { - break - } - if idx != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - // match: (MOVWstoreidx [i] {s} idx p w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} idx p (SRDconst [j+32] w) mem)) - // cond: x.Uses == 1 && clobber(x) - // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) - for { - i := v.AuxInt - s := v.Aux - idx := v.Args[0] - p := v.Args[1] - w0 := v.Args[2] - if w0.Op != OpS390XSRDconst { - break - } - j := w0.AuxInt - w := w0.Args[0] - x := v.Args[3] - if x.Op != OpS390XMOVWstoreidx { - break - } - if x.AuxInt != i-4 { - break - } - if x.Aux != s { - break - } - if idx != x.Args[0] { - break - } - if p != x.Args[1] { - break - } - x_2 := x.Args[2] - if x_2.Op != OpS390XSRDconst { - break - } - if x_2.AuxInt != j+32 { - break - } - if w != x_2.Args[0] { - break - } - mem := x.Args[3] - if !(x.Uses == 1 && clobber(x)) { - break - } - v.reset(OpS390XMOVDstoreidx) - v.AuxInt = i - 4 - v.Aux = s - v.AddArg(p) - v.AddArg(idx) - v.AddArg(w0) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMULLD(v *Value) bool { - // match: (MULLD x (MOVDconst [c])) - // cond: is32Bit(c) - // result: (MULLDconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - if !(is32Bit(c)) { - break - } - v.reset(OpS390XMULLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULLD (MOVDconst [c]) x) - // cond: is32Bit(c) - // result: (MULLDconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(is32Bit(c)) { - break - } - v.reset(OpS390XMULLDconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLDload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLDload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLDload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLDload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { - b := v.Block - _ = b - // match: (MULLDconst [-1] x) - // cond: - // result: (NEG x) - for { - if v.AuxInt != -1 { - break - } - x := v.Args[0] - v.reset(OpS390XNEG) - v.AddArg(x) - return true - } - // match: (MULLDconst [0] _) - // cond: - // result: (MOVDconst [0]) - for { - if v.AuxInt != 0 { - break - } - v.reset(OpS390XMOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULLDconst [1] x) - // cond: - // result: x - for { - if v.AuxInt != 1 { - break - } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULLDconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SLDconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpS390XSLDconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULLDconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUB (SLDconst [log2(c+1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c+1) && c >= 15) { - break - } - v.reset(OpS390XSUB) - v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLDconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (ADD (SLDconst [log2(c-1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-1) && c >= 17) { - break - } - v.reset(OpS390XADD) - v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLDconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [c*d]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c * d - return true - } - return false -} -func rewriteValueS390X_OpS390XMULLW(v *Value) bool { - // match: (MULLW x (MOVDconst [c])) - // cond: - // result: (MULLWconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMULLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULLW (MOVDconst [c]) x) - // cond: - // result: (MULLWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpS390XMULLWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (MULLWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XMULLWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - return false -} -func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { - b := v.Block - _ = b - // match: (MULLWconst [-1] x) - // cond: - // result: (NEGW x) - for { - if v.AuxInt != -1 { - break - } - x := v.Args[0] - v.reset(OpS390XNEGW) - v.AddArg(x) - return true - } - // match: (MULLWconst [0] _) - // cond: - // result: (MOVDconst [0]) - for { - if v.AuxInt != 0 { - break - } - v.reset(OpS390XMOVDconst) - v.AuxInt = 0 - return true - } - // match: (MULLWconst [1] x) - // cond: - // result: x - for { - if v.AuxInt != 1 { - break - } - x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (MULLWconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SLWconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpS390XSLWconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULLWconst [c] x) - // cond: isPowerOfTwo(c+1) && c >= 15 - // result: (SUBW (SLWconst [log2(c+1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c+1) && c >= 15) { - break - } - v.reset(OpS390XSUBW) - v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v0.AuxInt = log2(c + 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLWconst [c] x) - // cond: isPowerOfTwo(c-1) && c >= 17 - // result: (ADDW (SLWconst [log2(c-1)] x) x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c-1) && c >= 17) { - break - } - v.reset(OpS390XADDW) - v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v0.AuxInt = log2(c - 1) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (MULLWconst [c] (MOVDconst [d])) - // cond: - // result: (MOVDconst [int64(int32(c*d))]) - for { - c := v.AuxInt - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int32(c * d)) - return true - } - return false -} -func rewriteValueS390X_OpS390XNEG(v *Value) bool { - // match: (NEG (MOVDconst [c])) - // cond: - // result: (MOVDconst [-c]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = -c - return true - } - // match: (NEG (ADDconst [c] (NEG x))) - // cond: c != -(1<<31) - // result: (ADDconst [-c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XADDconst { - break - } - c := v_0.AuxInt - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpS390XNEG { - break - } - x := v_0_0.Args[0] - if !(c != -(1 << 31)) { - break - } - v.reset(OpS390XADDconst) - v.AuxInt = -c - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XNEGW(v *Value) bool { - // match: (NEGW (MOVDconst [c])) - // cond: - // result: (MOVDconst [int64(int32(-c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = int64(int32(-c)) - return true - } - return false -} -func rewriteValueS390X_OpS390XNOT(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (NOT x) - // cond: true - // result: (XOR (MOVDconst [-1]) x) - for { - x := v.Args[0] - if !(true) { - break - } - v.reset(OpS390XXOR) - v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64) - v0.AuxInt = -1 - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XNOTW(v *Value) bool { - // match: (NOTW x) - // cond: true - // result: (XORWconst [-1] x) - for { - x := v.Args[0] - if !(true) { - break - } - v.reset(OpS390XXORWconst) - v.AuxInt = -1 - v.AddArg(x) - return true - } - return false -} -func rewriteValueS390X_OpS390XOR(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (OR x (MOVDconst [c])) - // cond: isU32Bit(c) - // result: (ORconst [c] x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVDconst [c]) x) - // cond: isU32Bit(c) - // result: (ORconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - x := v.Args[1] - if !(isU32Bit(c)) { - break - } - v.reset(OpS390XORconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (RLLGconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XSLDconst { - break - } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRDconst { - break - } - d := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XSRDconst { - break - } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLDconst { - break - } - c := v_1.AuxInt - if x != v_1.Args[0] { - break - } - if !(d == 64-c) { - break - } - v.reset(OpS390XRLLGconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (OR (MOVDconst [c]) (MOVDconst [d])) - // cond: - // result: (MOVDconst [c|d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - d := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c | d - return true - } - // match: (OR (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c|d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c | d - return true - } - // match: (OR x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (OR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (OR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)) x1:(MOVBZload [i1] {s} p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZload [i1] {s} p mem) sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)) x1:(MOVHZload [i1] {s} p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZload [i1] {s} p mem) sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)) x1:(MOVWZload [i1] {s} p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y) s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} idx p mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} p idx mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR x1:(MOVWZloadidx [i1] {s} idx p mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem)) x1:(MOVWZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem)) x1:(MOVWZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} p idx mem)) x1:(MOVWZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem)) x1:(MOVWZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVWZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVWZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - s0 := v.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)) x0:(MOVBZload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))) r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 8 { - break - } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} p idx mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem))) r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLDconst { - break - } - if sh.AuxInt != 32 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVWZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVWBRloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVWZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVWBRloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, types.Int64) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if idx != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if idx != x0.Args[0] { - break - } - if p != x0.Args[1] { - break - } - if mem != x0.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + cmp := v_2.Args[0] + v.reset(OpS390XMOVDEQ) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) return true } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDEQ _ x (FlagEQ)) + // cond: + // result: x for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDEQ y _ (FlagLT)) + // cond: + // result: y for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = y.Type + v.AddArg(y) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDEQ y _ (FlagGT)) + // cond: + // result: y for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = y.Type + v.AddArg(y) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + return false +} +func rewriteValueS390X_OpS390XMOVDGE(v *Value) bool { + // match: (MOVDGE x y (InvertFlags cmp)) + // cond: + // result: (MOVDLE x y cmp) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + cmp := v_2.Args[0] + v.reset(OpS390XMOVDLE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDGE _ x (FlagEQ)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDGE y _ (FlagLT)) + // cond: + // result: y for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = y.Type + v.AddArg(y) return true } - // match: (OR or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDGE _ x (FlagGT)) + // cond: + // result: x for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { - break - } - if mem != x1.Args[2] { - break - } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + return false +} +func rewriteValueS390X_OpS390XMOVDGT(v *Value) bool { + // match: (MOVDGT x y (InvertFlags cmp)) + // cond: + // result: (MOVDLT x y cmp) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { - break - } - if p != x1.Args[1] { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - if mem != x1.Args[2] { + cmp := v_2.Args[0] + v.reset(OpS390XMOVDLT) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDGT y _ (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDGT y _ (FlagLT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = y.Type + v.AddArg(y) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDGT _ x (FlagGT)) + // cond: + // result: x for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - if idx != x1.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDLE(v *Value) bool { + // match: (MOVDLE x y (InvertFlags cmp)) + // cond: + // result: (MOVDGE x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - if p != x1.Args[1] { + cmp := v_2.Args[0] + v.reset(OpS390XMOVDGE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDLE _ x (FlagEQ)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - if mem != x1.Args[2] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLE _ x (FlagLT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLE y _ (FlagGT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = y.Type + v.AddArg(y) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + return false +} +func rewriteValueS390X_OpS390XMOVDLT(v *Value) bool { + // match: (MOVDLT x y (InvertFlags cmp)) + // cond: + // result: (MOVDGT x y cmp) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + cmp := v_2.Args[0] + v.reset(OpS390XMOVDGT) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDLT y _ (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDLT _ x (FlagLT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDLT y _ (FlagGT)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDNE(v *Value) bool { + // match: (MOVDNE x y (InvertFlags cmp)) + // cond: + // result: (MOVDNE x y cmp) + for { + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XInvertFlags { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + cmp := v_2.Args[0] + v.reset(OpS390XMOVDNE) + v.AddArg(x) + v.AddArg(y) + v.AddArg(cmp) + return true + } + // match: (MOVDNE y _ (FlagEQ)) + // cond: + // result: y + for { + y := v.Args[0] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagEQ { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + v.reset(OpCopy) + v.Type = y.Type + v.AddArg(y) + return true + } + // match: (MOVDNE _ x (FlagLT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagLT { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDNE _ x (FlagGT)) + // cond: + // result: x + for { + x := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFlagGT { break } - if p != x0.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool { + // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y) + // cond: is20Bit(c+d) && x.Op != OpSB + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if idx != x0.Args[1] { + d := v_0.AuxInt + x := v_0.Args[0] + y := v.Args[1] + if !(is20Bit(c+d) && x.Op != OpSB) { break } - if mem != x0.Args[2] { + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddridx [c] {s} x (ADDconst [d] y)) + // cond: is20Bit(c+d) && y.Op != OpSB + // result: (MOVDaddridx [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + d := v_1.AuxInt + y := v_1.Args[0] + if !(is20Bit(c+d) && y.Op != OpSB) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDaddridx) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { - break - } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + off2 := v_0.AuxInt + sym2 := v_0.Aux + x := v_0.Args[0] + y := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB) { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + v.reset(OpS390XMOVDaddridx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB + // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + off2 := v_1.AuxInt + sym2 := v_1.Aux + y := v_1.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2) && y.Op != OpSB) { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + v.reset(OpS390XMOVDaddridx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDload(v *Value) bool { + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVDreg x) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDstore { break } - i0 := x0.AuxInt - if x0.Aux != s { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if p != x0.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(off1+off2) + // result: (MOVDload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if idx != x0.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is20Bit(off1 + off2)) { break } - if mem != x0.Args[2] { + v.reset(OpS390XMOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + v.reset(OpS390XMOVDloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVDload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVDloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + v.reset(OpS390XMOVDloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDloadidx(v *Value) bool { + // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVDloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDnop(v *Value) bool { + b := v.Block + _ = b + // match: (MOVDnop x) + // cond: t.Compare(x.Type) == CMPeq + // result: x + for { + t := v.Type + x := v.Args[0] + if !(t.Compare(x.Type) == CMPeq) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDnop (MOVDconst [c])) + // cond: + // result: (MOVDconst [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if idx != x0.Args[0] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c + return true + } + // match: (MOVDnop x:(MOVBZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBZload { break } - if p != x0.Args[1] { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if mem != x0.Args[2] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBload { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y)) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDnop x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [off] {sym} ptr mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHload { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWZload { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWload { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if idx != x0.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVDload { break } - if p != x0.Args[1] { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if mem != x0.Args[2] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVBZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBZloadidx { break } - y := or.Args[1] - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDnop x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHZloadidx { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWZloadidx { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVDnop x:(MOVDloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVDloadidx { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVDreg x) + // cond: t.Compare(x.Type) == CMPeq + // result: x + for { + t := v.Type + x := v.Args[0] + if !(t.Compare(x.Type) == CMPeq) { break } - if p != x0.Args[0] { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVDreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if idx != x0.Args[1] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c + return true + } + // match: (MOVDreg x) + // cond: x.Uses == 1 + // result: (MOVDnop x) + for { + x := v.Args[0] + if !(x.Uses == 1) { break } - if mem != x0.Args[2] { + v.reset(OpS390XMOVDnop) + v.AddArg(x) + return true + } + // match: (MOVDreg x:(MOVBZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBZload { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDreg x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBload { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHload { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWZload { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if p != x0.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWload { break } - if idx != x0.Args[1] { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - if mem != x0.Args[2] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDload [off] {sym} ptr mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVDload { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, t) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBZloadidx { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHZloadidx { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWZloadidx { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVDreg x:(MOVDloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVDloadidx { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - if idx != x0.Args[0] { + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool { + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(off1+off2) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if p != x0.Args[1] { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is20Bit(off1 + off2)) { break } - if mem != x0.Args[2] { + v.reset(OpS390XMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validValAndOff(c,off) && int64(int16(c)) == c && ptr.Op != OpSB + // result: (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v_1.AuxInt + mem := v.Args[2] + if !(validValAndOff(c, off) && int64(int16(c)) == c && ptr.Op != OpSB) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLDconst { - break - } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XOR { + v.reset(OpS390XMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVDstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x) + // result: (STMG2 [i-8] {s} p w0 w1 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w1 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVDstore { break } - if idx != x0.Args[0] { + if x.AuxInt != i-8 { break } - if p != x0.Args[1] { + if x.Aux != s { break } - if mem != x0.Args[2] { + if p != x.Args[0] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + w0 := x.Args[1] + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-8) && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XSTMG2) + v.AuxInt = i - 8 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(mem) return true } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(i-16) && clobber(x) + // result: (STMG3 [i-16] {s} p w0 w1 w2 mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w2 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XSTMG2 { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + if x.AuxInt != i-16 { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + if p != x.Args[0] { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + w0 := x.Args[1] + w1 := x.Args[2] + mem := x.Args[3] + if !(x.Uses == 1 && is20Bit(i-16) && clobber(x)) { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + v.reset(OpS390XSTMG3) + v.AuxInt = i - 16 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(mem) + return true + } + // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) + // cond: x.Uses == 1 && is20Bit(i-24) && clobber(x) + // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w3 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XSTMG3 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-24 { break } - if p != x1.Args[0] { + if x.Aux != s { break } - if idx != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + w0 := x.Args[1] + w1 := x.Args[2] + w2 := x.Args[3] + mem := x.Args[4] + if !(x.Uses == 1 && is20Bit(i-24) && clobber(x)) { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XSTMG4) + v.AuxInt = i - 24 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(w3) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool { + // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + v.reset(OpS390XMOVDstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVDstoreidx(v *Value) bool { + // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool { + // match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstore [i-2] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + if v_1.AuxInt != 16 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHBRstore { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-2 { break } - if p != x1.Args[0] { + if x.Aux != s { break } - if idx != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + if w != x.Args[1] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstore [i-2] {s} p w0 mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHBRstore { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + if x.AuxInt != i-2 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if x.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if p != x1.Args[0] { + w0 := x.Args[1] + if w0.Op != OpS390XSRDconst { break } - if idx != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstore [i-2] {s} p w mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRWconst { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + if v_1.AuxInt != 16 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHBRstore { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-2 { break } - if p != x1.Args[0] { + if x.Aux != s { break } - if idx != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + if w != x.Args[1] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstore [i-2] {s} p w0 mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRWconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHBRstore { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + if x.AuxInt != i-2 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if x.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if idx != x1.Args[0] { + w0 := x.Args[1] + if w0.Op != OpS390XSRWconst { break } - if p != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + return false +} +func rewriteValueS390X_OpS390XMOVHBRstoreidx(v *Value) bool { + // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + if v_2.AuxInt != 16 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if x.AuxInt != i-2 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.Aux != s { break } - if idx != x1.Args[0] { + if p != x.Args[0] { break } - if p != x1.Args[1] { + if idx != x.Args[1] { break } - if mem != x1.Args[2] { + if w != x.Args[2] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVHBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRDconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + if x.AuxInt != i-2 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + if x.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if p != x.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if idx != x1.Args[0] { + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst { break } - if p != x1.Args[1] { + if w0.AuxInt != j-16 { break } - if mem != x1.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) + // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [16] w) x:(MOVHBRstoreidx [i-2] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstoreidx [i-2] {s} p idx w mem) for { - or := v.Args[0] - if or.Op != OpS390XOR { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLDconst { - break - } - j0 := s0.AuxInt - r0 := s0.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLDconst { + if v_2.AuxInt != 16 { break } - j1 := s1.AuxInt - r1 := s1.Args[0] - if r1.Op != OpS390XMOVHZreg { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if x.AuxInt != i-2 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.Aux != s { break } - if idx != x1.Args[0] { + if p != x.Args[0] { break } - if p != x1.Args[1] { + if idx != x.Args[1] { break } - if mem != x1.Args[2] { + if w != x.Args[2] { break } - if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XOR, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XORW(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (ORW x (MOVDconst [c])) - // cond: - // result: (ORWconst [c] x) + // match: (MOVHBRstoreidx [i] {s} p idx (SRWconst [j] w) x:(MOVHBRstoreidx [i-2] {s} p idx w0:(SRWconst [j-16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWBRstoreidx [i-2] {s} p idx w0 mem) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRWconst { break } - c := v_1.AuxInt - v.reset(OpS390XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (MOVDconst [c]) x) - // cond: - // result: (ORWconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHBRstoreidx { break } - c := v_0.AuxInt - x := v.Args[1] - v.reset(OpS390XORWconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (RLLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XSLWconst { + if x.AuxInt != i-2 { break } - c := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSRWconst { + if x.Aux != s { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if p != x.Args[0] { break } - if !(d == 32-c) { + if idx != x.Args[1] { break } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (ORW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XSRWconst { + w0 := x.Args[2] + if w0.Op != OpS390XSRWconst { break } - d := v_0.AuxInt - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpS390XSLWconst { + if w0.AuxInt != j-16 { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if w != w0.Args[0] { break } - if !(d == 32-c) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - v.reset(OpS390XRLLconst) - v.AuxInt = c - v.AddArg(x) + v.reset(OpS390XMOVWBRstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW x x) - // cond: - // result: x + return false +} +func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool { + // match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVHZreg x) for { - x := v.Args[0] - if x != v.Args[1] { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHstore { break } - v.reset(OpCopy) - v.Type = x.Type + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpS390XMOVHZreg) v.AddArg(x) return true } - // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(off1+off2) + // result: (MOVHZload [off1+off2] {sym} ptr mem) for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is20Bit(off1 + off2)) { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off + v.reset(OpS390XMOVHZload) + v.AuxInt = off1 + off2 v.Aux = sym - v.AddArg(x) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) + v.reset(OpS390XMOVHZload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) v.AddArg(mem) return true } - // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVHZloadidx [off] {sym} ptr idx mem) for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - v.reset(OpS390XORWload) - v.Type = t + v.reset(OpS390XMOVHZloadidx) v.AuxInt = off v.Aux = sym - v.AddArg(x) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + return false +} +func rewriteValueS390X_OpS390XMOVHZloadidx(v *Value) bool { + // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d v.Aux = sym - v.AddArg(x) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHZloadidx) + v.AuxInt = c + d v.Aux = sym - v.AddArg(x) v.AddArg(ptr) + v.AddArg(idx) v.AddArg(mem) return true } - // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + return false +} +func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVHZreg x:(MOVBZload _ _)) + // cond: + // result: (MOVDreg x) for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym + x := v.Args[0] + if x.Op != OpS390XMOVBZload { + break + } + v.reset(OpS390XMOVDreg) v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) return true } - // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (ORWload [off] {sym} x ptr mem) + // match: (MOVHZreg x:(MOVHZload _ _)) + // cond: + // result: (MOVDreg x) for { - t := v.Type x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { + if x.Op != OpS390XMOVHZload { break } - v.reset(OpS390XORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym + v.reset(OpS390XMOVDreg) v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) return true } - // match: (ORW x1:(MOVBZload [i1] {s} p mem) sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) + // match: (MOVHZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) + // result: (MOVDreg x) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { - break - } - if p != x0.Args[0] { - break - } - if mem != x0.Args[1] { + x := v.Args[0] + if x.Op != OpArg { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v.reset(OpS390XMOVDreg) + v.AddArg(x) return true } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)) x1:(MOVBZload [i1] {s} p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem) + // match: (MOVHZreg x:(MOVBZreg _)) + // cond: + // result: (MOVDreg x) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { - break - } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v.reset(OpS390XMOVDreg) + v.AddArg(x) return true } - // match: (ORW x1:(MOVHZload [i1] {s} p mem) sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) + // match: (MOVHZreg x:(MOVHZreg _)) + // cond: + // result: (MOVDreg x) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { + x := v.Args[0] + if x.Op != OpS390XMOVHZreg { break } - if p != x0.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHZreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(uint16(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x0.Args[1] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(uint16(c)) + return true + } + // match: (MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) v0.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)) x1:(MOVHZload [i1] {s} p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) + // match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZload { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if mem != x1.Args[1] { + x := v.Args[0] + if x.Op != OpS390XMOVHZloadidx { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) v0.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) + return false +} +func rewriteValueS390X_OpS390XMOVHload(v *Value) bool { + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(off1+off2) + // result: (MOVHload [off1+off2] {sym} ptr mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is20Bit(off1 + off2)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVHload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - if p != x1.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[1] { + v.reset(OpS390XMOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVHreg x:(MOVBload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBZload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDreg) + v.AddArg(x) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) + // match: (MOVHreg x:(MOVHload _ _)) + // cond: + // result: (MOVDreg x) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { + x := v.Args[0] + if x.Op != OpS390XMOVHload { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t) + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpArg { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { + t := x.Type + if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBreg { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBZreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { break } - if p != x1.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int16(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x1.Args[1] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int16(c)) + return true + } + // match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y) s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) + return false +} +func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool { + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { - break - } - i0 := x0.AuxInt - if x0.Aux != s { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHreg { break } - if p != x0.Args[0] { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem) + // cond: + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHZreg { break } - if mem != x0.Args[1] { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(off1+off2) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is20Bit(off1 + off2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZload [i0] {s} p mem)) y) + // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validOff(off) && ptr.Op != OpSB + // result: (MOVHstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off) && ptr.Op != OpSB) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - if p != x0.Args[0] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x0.Args[1] { + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVHstoreidx [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if p != x0.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { break } - if idx != x0.Args[1] { + if x_1.AuxInt != 16 { break } - if mem != x0.Args[2] { + if w != x_1.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w0 mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRDconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if p != x0.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { break } - if idx != x0.Args[1] { + if x_1.AuxInt != j+16 { break } - if mem != x0.Args[2] { + if w != x_1.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVBZloadidx [i1] {s} p idx mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if idx != x0.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpS390XSRWconst { break } - if p != x0.Args[1] { + if x_1.AuxInt != 16 { break } - if mem != x0.Args[2] { + if w != x_1.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVBZloadidx [i1] {s} idx p mem) sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVWstore [i-2] {s} p w0 mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRWconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVHstore { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if idx != x0.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpS390XSRWconst { break } - if p != x0.Args[1] { + if x_1.AuxInt != j+16 { break } - if mem != x0.Args[2] { + if w != x_1.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool { + // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 8 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVHstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 8 { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XMOVHstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem)) + // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x) + // result: (MOVWstoreconst [makeValAndOff(ValAndOff(c).Val()&0xffff | ValAndOff(a).Val()<<16, ValAndOff(a).Off())] {s} p mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpS390XMOVHstoreconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + a := x.AuxInt + if x.Aux != s { break } - if p != x1.Args[0] { + if p != x.Args[0] { break } - if idx != x1.Args[1] { + mem := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) { break } - if mem != x1.Args[2] { + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = makeValAndOff(ValAndOff(c).Val()&0xffff|ValAndOff(a).Val()<<16, ValAndOff(a).Off()) + v.Aux = s + v.AddArg(p) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHstoreidx(v *Value) bool { + // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVHstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} p idx mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { + if p != x.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if idx != x1.Args[0] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { break } - if p != x1.Args[1] { + if x_2.AuxInt != 16 { break } - if mem != x1.Args[2] { + if w != x_2.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) + // match: (MOVHstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRDconst [j+16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { + break + } + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { break } - if sh.AuxInt != 8 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVBZloadidx { + if p != x.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x.Args[1] { break } - if idx != x1.Args[0] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { break } - if p != x1.Args[1] { + if x_2.AuxInt != j+16 { break } - if mem != x1.Args[2] { + if w != x_2.Args[0] { break } - if !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + // match: (MOVHstoreidx [i] {s} p idx w x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if x.AuxInt != i-2 { break } - if sh.AuxInt != 16 { + if x.Aux != s { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if idx != x.Args[1] { break } - if p != x0.Args[0] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst { break } - if idx != x0.Args[1] { + if x_2.AuxInt != 16 { break } - if mem != x0.Args[2] { + if w != x_2.Args[0] { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + // match: (MOVHstoreidx [i] {s} p idx w0:(SRWconst [j] w) x:(MOVHstoreidx [i-2] {s} p idx (SRWconst [j+16] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVWstoreidx [i-2] {s} p idx w0 mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRWconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVHstoreidx { break } - if sh.AuxInt != 16 { + if x.AuxInt != i-2 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if p != x0.Args[0] { + if idx != x.Args[1] { break } - if idx != x0.Args[1] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRWconst { break } - if mem != x0.Args[2] { + if x_2.AuxInt != j+16 { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if w != x_2.Args[0] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { + break + } + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVHZloadidx [i1] {s} p idx mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool { + // match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDBRstore [i-4] {s} p w mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - if sh.AuxInt != 16 { + if v_1.AuxInt != 32 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWBRstore { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x.AuxInt != i-4 { break } - if idx != x0.Args[0] { + if x.Aux != s { break } - if p != x0.Args[1] { + if p != x.Args[0] { break } - if mem != x0.Args[2] { + if w != x.Args[1] { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVDBRstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDBRstore [i-4] {s} p w0 mem) for { - x1 := v.Args[0] - if x1.Op != OpS390XMOVHZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + j := v_1.AuxInt + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWBRstore { break } - if sh.AuxInt != 16 { + if x.AuxInt != i-4 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + if x.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x.Args[0] { break } - if idx != x0.Args[0] { + w0 := x.Args[1] + if w0.Op != OpS390XSRDconst { break } - if p != x0.Args[1] { + if w0.AuxInt != j-32 { break } - if mem != x0.Args[2] { + if w != w0.Args[0] { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVDBRstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVWBRstoreidx(v *Value) bool { + // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [32] w) x:(MOVWBRstoreidx [i-4] {s} p idx w mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDBRstoreidx [i-4] {s} p idx w mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - if sh.AuxInt != 16 { + if v_2.AuxInt != 32 { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWBRstoreidx { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { + if x.AuxInt != i-4 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.Aux != s { break } - if p != x1.Args[0] { + if p != x.Args[0] { break } - if idx != x1.Args[1] { + if idx != x.Args[1] { break } - if mem != x1.Args[2] { + if w != x.Args[2] { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVDBRstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} p idx mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + // match: (MOVWBRstoreidx [i] {s} p idx (SRDconst [j] w) x:(MOVWBRstoreidx [i-4] {s} p idx w0:(SRDconst [j-32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDBRstoreidx [i-4] {s} p idx w0 mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XSRDconst { break } - if sh.AuxInt != 16 { + j := v_2.AuxInt + w := v_2.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWBRstoreidx { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + if x.AuxInt != i-4 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { + if x.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if p != x1.Args[0] { + if idx != x.Args[1] { break } - if idx != x1.Args[1] { + w0 := x.Args[2] + if w0.Op != OpS390XSRDconst { break } - if mem != x1.Args[2] { + if w0.AuxInt != j-32 { + break + } + if w != w0.Args[0] { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVDBRstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + return false +} +func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool { + // match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: (MOVWZreg x) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 16 { - break - } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if idx != x1.Args[0] { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWstore { break } - if p != x1.Args[1] { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - if mem != x1.Args[2] { + v.reset(OpS390XMOVWZreg) + v.AddArg(x) + return true + } + // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(off1+off2) + // result: (MOVWZload [off1+off2] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is20Bit(off1 + off2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVWZload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) - // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) + // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - if sh.AuxInt != 16 { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - x0 := sh.Args[0] - if x0.Op != OpS390XMOVHZloadidx { + v.reset(OpS390XMOVWZload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - x1 := v.Args[1] - if x1.Op != OpS390XMOVHZloadidx { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWZload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVWZloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - if idx != x1.Args[0] { + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { break } - if p != x1.Args[1] { + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZloadidx(v *Value) bool { + // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: + // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if mem != x1.Args[2] { + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: + // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - if !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWZloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWZreg x:(MOVBZload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) + v.reset(OpS390XMOVDreg) + v.AddArg(x) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWZreg x:(MOVHZload _ _)) + // cond: + // result: (MOVDreg x) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpArg { break } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVBZreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVHZreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZreg { break } - if p != x1.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg x:(MOVWZreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZreg { break } - if idx != x1.Args[1] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWZreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(uint32(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x1.Args[2] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(uint32(c)) + return true + } + // match: (MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { - break - } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + x := v.Args[0] + if x.Op != OpS390XMOVWZloadidx { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + return false +} +func rewriteValueS390X_OpS390XMOVWload(v *Value) bool { + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: is20Bit(off1+off2) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(is20Bit(off1 + off2)) { break } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + v.reset(OpS390XMOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool { + b := v.Block + _ = b + // match: (MOVWreg x:(MOVBload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { break } - if idx != x1.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBZload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZload { break } - if p != x1.Args[1] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHload { break } - if mem != x1.Args[2] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHZload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZload { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWload { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDreg) + v.AddArg(x) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWreg x:(Arg )) + // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) + // result: (MOVDreg x) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { + x := v.Args[0] + if x.Op != OpArg { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + t := x.Type + if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBreg { break } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBZreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZreg { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHreg { break } - if idx != x1.Args[0] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // cond: + // result: (MOVDreg x) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWreg { break } - if p != x1.Args[1] { + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x1.Args[2] { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(c)) + return true + } + // match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZload { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := x.AuxInt + sym := x.Aux + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWload, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + return false +} +func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool { + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWreg { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWZreg { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpS390XMOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // cond: is20Bit(off1+off2) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is20Bit(off1 + off2)) { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) + // cond: validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB + // result: (MOVWstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + c := v_1.AuxInt + mem := v.Args[2] + if !(validOff(off) && int64(int16(c)) == c && ptr.Op != OpSB) { break } - if p != x1.Args[0] { + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - if idx != x1.Args[1] { + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - if mem != x1.Args[2] { + v.reset(OpS390XMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVWstoreidx [off] {sym} ptr idx val mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { + off := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstore [i-4] {s} p w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + if v_1.AuxInt != 32 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + w := v_1.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x.AuxInt != i-4 { break } - if p != x1.Args[0] { + if x.Aux != s { break } - if idx != x1.Args[1] { + if p != x.Args[0] { break } - if mem != x1.Args[2] { + if w != x.Args[1] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem)) + // cond: p.Op != OpSB && x.Uses == 1 && clobber(x) + // result: (MOVDstore [i-4] {s} p w0 mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w0 := v.Args[1] + if w0.Op != OpS390XSRDconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + if x.AuxInt != i-4 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x.Args[0] { break } - if idx != x1.Args[0] { + x_1 := x.Args[1] + if x_1.Op != OpS390XSRDconst { break } - if p != x1.Args[1] { + if x_1.AuxInt != j+32 { break } - if mem != x1.Args[2] { + if w != x_1.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) + // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x) + // result: (STM2 [i-4] {s} p w0 w1 mem) for { - s0 := v.Args[0] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { - break - } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w1 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XMOVWstore { break } - if idx != x1.Args[0] { + if x.AuxInt != i-4 { break } - if p != x1.Args[1] { + if x.Aux != s { break } - if mem != x1.Args[2] { + if p != x.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + w0 := x.Args[1] + mem := x.Args[2] + if !(p.Op != OpSB && x.Uses == 1 && is20Bit(i-4) && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XSTM2) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(mem) return true } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) + // cond: x.Uses == 1 && is20Bit(i-8) && clobber(x) + // result: (STM3 [i-8] {s} p w0 w1 w2 mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w2 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XSTM2 { break } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + if x.AuxInt != i-8 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + if p != x.Args[0] { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + w0 := x.Args[1] + w1 := x.Args[2] + mem := x.Args[3] + if !(x.Uses == 1 && is20Bit(i-8) && clobber(x)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XSTM3) + v.AuxInt = i - 8 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(mem) + return true + } + // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) + // cond: x.Uses == 1 && is20Bit(i-12) && clobber(x) + // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + w3 := v.Args[1] + x := v.Args[2] + if x.Op != OpS390XSTM3 { break } - if p != x0.Args[0] { + if x.AuxInt != i-12 { break } - if idx != x0.Args[1] { + if x.Aux != s { break } - if mem != x0.Args[2] { + if p != x.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + w0 := x.Args[1] + w1 := x.Args[2] + w2 := x.Args[3] + mem := x.Args[4] + if !(x.Uses == 1 && is20Bit(i-12) && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XSTM4) + v.AuxInt = i - 12 + v.Aux = s + v.AddArg(p) + v.AddArg(w0) + v.AddArg(w1) + v.AddArg(w2) + v.AddArg(w3) + v.AddArg(mem) return true } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + return false +} +func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + sc := v.AuxInt + s := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + off := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddr { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + off := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XMOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem)) + // cond: p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) + // result: (MOVDstore [ValAndOff(a).Off()] {s} p (MOVDconst [ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32]) mem) + for { + c := v.AuxInt + s := v.Aux + p := v.Args[0] + x := v.Args[1] + if x.Op != OpS390XMOVWstoreconst { break } - if p != x0.Args[0] { + a := x.AuxInt + if x.Aux != s { break } - if idx != x0.Args[1] { + if p != x.Args[0] { break } - if mem != x0.Args[2] { + mem := x.Args[1] + if !(p.Op != OpSB && x.Uses == 1 && ValAndOff(a).Off()+4 == ValAndOff(c).Off() && clobber(x)) { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XMOVDstore) + v.AuxInt = ValAndOff(a).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64) + v0.AuxInt = ValAndOff(c).Val()&0xffffffff | ValAndOff(a).Val()<<32 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWstoreidx(v *Value) bool { + // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpS390XMOVWstoreidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx [i] {s} p idx w x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDstoreidx [i-4] {s} p idx w mem) + for { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w := v.Args[2] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x.AuxInt != i-4 { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + if x.Aux != s { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if idx != x.Args[1] { break } - if p != x0.Args[0] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { break } - if idx != x0.Args[1] { + if x_2.AuxInt != 32 { break } - if mem != x0.Args[2] { + if w != x_2.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w) + v.AddArg(mem) return true } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MOVWstoreidx [i] {s} p idx w0:(SRDconst [j] w) x:(MOVWstoreidx [i-4] {s} p idx (SRDconst [j+32] w) mem)) + // cond: x.Uses == 1 && clobber(x) + // result: (MOVDstoreidx [i-4] {s} p idx w0 mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + i := v.AuxInt + s := v.Aux + p := v.Args[0] + idx := v.Args[1] + w0 := v.Args[2] + if w0.Op != OpS390XSRDconst { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + j := w0.AuxInt + w := w0.Args[0] + x := v.Args[3] + if x.Op != OpS390XMOVWstoreidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x.AuxInt != i-4 { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + if x.Aux != s { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if p != x.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if idx != x.Args[1] { break } - if p != x0.Args[0] { + x_2 := x.Args[2] + if x_2.Op != OpS390XSRDconst { break } - if idx != x0.Args[1] { + if x_2.AuxInt != j+32 { break } - if mem != x0.Args[2] { + if w != x_2.Args[0] { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + mem := x.Args[3] + if !(x.Uses == 1 && clobber(x)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMOVDstoreidx) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v.AddArg(idx) + v.AddArg(w0) + v.AddArg(mem) return true } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + return false +} +func rewriteValueS390X_OpS390XMULLD(v *Value) bool { + // match: (MULLD x (MOVDconst [c])) + // cond: is32Bit(c) + // result: (MULLDconst [c] x) for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + c := v_1.AuxInt + if !(is32Bit(c)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XMULLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLD (MOVDconst [c]) x) + // cond: is32Bit(c) + // result: (MULLDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + c := v_0.AuxInt + x := v.Args[1] + if !(is32Bit(c)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XMULLDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLD x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLDload [off] {sym} x ptr mem) + for { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVDload { break } - if idx != x0.Args[0] { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if p != x0.Args[1] { + v.reset(OpS390XMULLDload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULLD g:(MOVDload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLDload [off] {sym} x ptr mem) + for { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVDload { break } - if mem != x0.Args[2] { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XMULLDload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool { + b := v.Block + _ = b + // match: (MULLDconst [-1] x) + // cond: + // result: (NEG x) + for { + if v.AuxInt != -1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + x := v.Args[0] + v.reset(OpS390XNEG) + v.AddArg(x) return true } - // match: (ORW or:(ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) y) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MULLDconst [0] _) + // cond: + // result: (MOVDconst [0]) for { - or := v.Args[0] - if or.Op != OpS390XORW { + if v.AuxInt != 0 { break } - s1 := or.Args[0] - if s1.Op != OpS390XSLWconst { + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULLDconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SLDconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - y := or.Args[1] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XSLDconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUB (SLDconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XSUB) + v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (ADD (SLDconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XADD) + v0 := b.NewValue0(v.Pos, OpS390XSLDconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLDconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [c*d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if idx != x0.Args[0] { + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLW(v *Value) bool { + // match: (MULLW x (MOVDconst [c])) + // cond: + // result: (MULLWconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - if p != x0.Args[1] { + c := v_1.AuxInt + v.reset(OpS390XMULLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLW (MOVDconst [c]) x) + // cond: + // result: (MULLWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x0.Args[2] { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XMULLWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULLW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) + for { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVWload { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MULLW g:(MOVWload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVWload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULLW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) + for { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVWZload { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULLW g:(MOVWZload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (MULLWload [off] {sym} x ptr mem) + for { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVWZload { break } - if idx != x0.Args[0] { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if p != x0.Args[1] { + v.reset(OpS390XMULLWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool { + b := v.Block + _ = b + // match: (MULLWconst [-1] x) + // cond: + // result: (NEGW x) + for { + if v.AuxInt != -1 { break } - if mem != x0.Args[2] { + x := v.Args[0] + v.reset(OpS390XNEGW) + v.AddArg(x) + return true + } + // match: (MULLWconst [0] _) + // cond: + // result: (MOVDconst [0]) + for { + if v.AuxInt != 0 { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XMOVDconst) + v.AuxInt = 0 + return true + } + // match: (MULLWconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + x := v.Args[0] v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORW or:(ORW y s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SLWconst [log2(c)] x) for { - or := v.Args[0] - if or.Op != OpS390XORW { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { break } - y := or.Args[0] - s1 := or.Args[1] - if s1.Op != OpS390XSLWconst { + v.reset(OpS390XSLWconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c+1) && c >= 15 + // result: (SUBW (SLWconst [log2(c+1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c+1) && c >= 15) { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XSUBW) + v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] x) + // cond: isPowerOfTwo(c-1) && c >= 17 + // result: (ADDW (SLWconst [log2(c-1)] x) x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c-1) && c >= 17) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - s0 := v.Args[1] - if s0.Op != OpS390XSLWconst { + v.reset(OpS390XADDW) + v0 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(x) + return true + } + // match: (MULLWconst [c] (MOVDconst [d])) + // cond: + // result: (MOVDconst [int64(int32(c*d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + d := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(c * d)) + return true + } + return false +} +func rewriteValueS390X_OpS390XNEG(v *Value) bool { + // match: (NEG (MOVDconst [c])) + // cond: + // result: (MOVDconst [-c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = -c + return true + } + // match: (NEG (ADDconst [c] (NEG x))) + // cond: c != -(1<<31) + // result: (ADDconst [-c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { break } - if idx != x0.Args[0] { + c := v_0.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XNEG { break } - if p != x0.Args[1] { + x := v_0_0.Args[0] + if !(c != -(1 << 31)) { break } - if mem != x0.Args[2] { + v.reset(OpS390XADDconst) + v.AuxInt = -c + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XNEGW(v *Value) bool { + // match: (NEGW (MOVDconst [c])) + // cond: + // result: (MOVDconst [int64(int32(-c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + c := v_0.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = int64(int32(-c)) + return true + } + return false +} +func rewriteValueS390X_OpS390XNOT(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (NOT x) + // cond: true + // result: (XOR (MOVDconst [-1]) x) + for { + x := v.Args[0] + if !(true) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) + v.reset(OpS390XXOR) + v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, types.UInt64) + v0.AuxInt = -1 v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j1 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, types.UInt16) - v2.AuxInt = i0 - v2.Aux = s - v2.AddArg(p) - v2.AddArg(idx) - v2.AddArg(mem) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XNOTW(v *Value) bool { + // match: (NOTW x) + // cond: true + // result: (XORWconst [-1] x) + for { + x := v.Args[0] + if !(true) { + break + } + v.reset(OpS390XXORWconst) + v.AuxInt = -1 + v.AddArg(x) return true } - // match: (ORW x0:(MOVBZload [i0] {s} p mem) sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) + return false +} +func rewriteValueS390X_OpS390XOR(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (OR x (MOVDconst [c])) + // cond: isU32Bit(c) + // result: (ORconst [c] x) for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZload { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + c := v_1.AuxInt + if !(isU32Bit(c)) { break } - if sh.AuxInt != 8 { + v.reset(OpS390XORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR (MOVDconst [c]) x) + // cond: isU32Bit(c) + // result: (ORconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { + c := v_0.AuxInt + x := v.Args[1] + if !(isU32Bit(c)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( OR (SLDconst x [c]) (SRDconst x [64-c])) + // cond: + // result: (RLLGconst [ c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XSLDconst { break } - if p != x1.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRDconst { break } - if mem != x1.Args[1] { + if v_1.AuxInt != 64-c { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if x != v_1.Args[0] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + v.reset(OpS390XRLLGconst) + v.AuxInt = c + v.AddArg(x) return true } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)) x0:(MOVBZload [i0] {s} p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem)) + // match: ( OR (SRDconst x [c]) (SLDconst x [64-c])) + // cond: + // result: (RLLGconst [64-c] x) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 8 { + v_0 := v.Args[0] + if v_0.Op != OpS390XSRDconst { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZload { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSLDconst { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZload { + if v_1.AuxInt != 64-c { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x != v_1.Args[0] { break } - if p != x0.Args[0] { + v.reset(OpS390XRLLGconst) + v.AuxInt = 64 - c + v.AddArg(x) + return true + } + // match: (OR (MOVDconst [c]) (MOVDconst [d])) + // cond: + // result: (MOVDconst [c|d]) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - if mem != x0.Args[1] { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + d := v_1.AuxInt + v.reset(OpS390XMOVDconst) + v.AuxInt = c | d + return true + } + // match: (OR x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(mem) - v0.AddArg(v1) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) + // match: (OR x g:(MOVDload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORload [off] {sym} x ptr mem) for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { - break - } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { - break - } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { - break - } - if sh.AuxInt != 16 { - break - } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVDload { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - i1 := x1.AuxInt - if x1.Aux != s { + v.reset(OpS390XORload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (OR g:(MOVDload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORload [off] {sym} x ptr mem) + for { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVDload { break } - if p != x1.Args[0] { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if mem != x1.Args[1] { + v.reset(OpS390XORload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (OR x:(SLDconst _) y) + // cond: y.Op != OpS390XSLDconst + // result: (OR y x) + for { + x := v.Args[0] + if x.Op != OpS390XSLDconst { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + y := v.Args[1] + if !(y.Op != OpS390XSLDconst) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) + v.reset(OpS390XOR) + v.AddArg(y) + v.AddArg(x) return true } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem) + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i+1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i+2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i+3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i+4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i+5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i+6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i+7] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRload [i] {s} p mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + o0 := v.Args[0] + if o0.Op != OpS390XOR { break } - if sh.AuxInt != 16 { + o1 := o0.Args[0] + if o1.Op != OpS390XOR { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + o2 := o1.Args[0] + if o2.Op != OpS390XOR { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRload { + o3 := o2.Args[0] + if o3.Op != OpS390XOR { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { + o4 := o3.Args[0] + if o4.Op != OpS390XOR { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRload { + o5 := o4.Args[0] + if o5.Op != OpS390XOR { break } - i0 := x0.AuxInt - if x0.Aux != s { + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZload { break } - if p != x0.Args[0] { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { break } - if mem != x0.Args[1] { + if s0.AuxInt != 8 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(mem) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + if x1.AuxInt != i+1 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + if x1.Aux != s { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { + if p != x1.Args[0] { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + if mem != x1.Args[1] { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + if s1.AuxInt != 16 { break } - if p != x0.Args[0] { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { break } - if mem != x0.Args[1] { + if x2.AuxInt != i+2 { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x2.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + if p != x2.Args[0] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + if mem != x2.Args[1] { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - mem := x1.Args[1] - or := v.Args[1] - if or.Op != OpS390XORW { + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + if s2.AuxInt != 24 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x3.AuxInt != i+3 { break } - if p != x0.Args[0] { + if x3.Aux != s { break } - if mem != x0.Args[1] { + if p != x3.Args[0] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x3.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y) s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XORW { + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + if s3.AuxInt != 32 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZload { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + if x4.AuxInt != i+4 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + if x4.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x4.Args[0] { break } - if p != x1.Args[0] { + if mem != x4.Args[1] { break } - if mem != x1.Args[1] { + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if s4.AuxInt != 40 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XORW { + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZload { + break + } + if x5.AuxInt != i+5 { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + if x5.Aux != s { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZload { + if p != x5.Args[0] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - mem := x0.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + if mem != x5.Args[1] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZload { + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { break } - i1 := x1.AuxInt - if x1.Aux != s { + if s5.AuxInt != 48 { break } - if p != x1.Args[0] { + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZload { break } - if mem != x1.Args[1] { + if x6.AuxInt != i+6 { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x6.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if p != x6.Args[0] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if mem != x6.Args[1] { break } - if sh.AuxInt != 8 { + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if s6.AuxInt != 56 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZload { break } - if p != x1.Args[0] { + if x7.AuxInt != i+7 { break } - if idx != x1.Args[1] { + if x7.Aux != s { break } - if mem != x1.Args[2] { + if p != x7.Args[0] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if mem != x7.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpS390XMOVDBRload, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORW x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i+2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i+3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i+4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i+5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i+6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i+7] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDBRloadidx [i] {s} p idx mem) for { - x0 := v.Args[0] + o0 := v.Args[0] + if o0.Op != OpS390XOR { + break + } + o1 := o0.Args[0] + if o1.Op != OpS390XOR { + break + } + o2 := o1.Args[0] + if o2.Op != OpS390XOR { + break + } + o3 := o2.Args[0] + if o3.Op != OpS390XOR { + break + } + o4 := o3.Args[0] + if o4.Op != OpS390XOR { + break + } + o5 := o4.Args[0] + if o5.Op != OpS390XOR { + break + } + x0 := o5.Args[0] if x0.Op != OpS390XMOVBZloadidx { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] + p := x0.Args[0] + idx := x0.Args[1] mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { break } - if sh.AuxInt != 8 { + if s0.AuxInt != 8 { break } - x1 := sh.Args[0] + x1 := s0.Args[0] if x1.Op != OpS390XMOVBZloadidx { break } - i1 := x1.AuxInt + if x1.AuxInt != i+1 { + break + } if x1.Aux != s { break } @@ -30630,1381 +15238,1059 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if mem != x1.Args[2] { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW x0:(MOVBZloadidx [i0] {s} p idx mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s1.AuxInt != 16 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { break } - if sh.AuxInt != 8 { + if x2.AuxInt != i+2 { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x2.Aux != s { break } - i1 := x1.AuxInt - if x1.Aux != s { + if p != x2.Args[0] { break } - if idx != x1.Args[0] { + if idx != x2.Args[1] { break } - if p != x1.Args[1] { + if mem != x2.Args[2] { break } - if mem != x1.Args[2] { + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if s2.AuxInt != 24 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW x0:(MOVBZloadidx [i0] {s} idx p mem) sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - x0 := v.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if x3.AuxInt != i+3 { break } - if sh.AuxInt != 8 { + if x3.Aux != s { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if p != x3.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if idx != x3.Args[1] { break } - if idx != x1.Args[0] { + if mem != x3.Args[2] { break } - if p != x1.Args[1] { + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { break } - if mem != x1.Args[2] { + if s3.AuxInt != 32 { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZloadidx { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + if x4.AuxInt != i+4 { break } - if sh.AuxInt != 8 { + if x4.Aux != s { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if p != x4.Args[0] { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { + if idx != x4.Args[1] { break } - i0 := x0.AuxInt - if x0.Aux != s { + if mem != x4.Args[2] { break } - if p != x0.Args[0] { + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { break } - if idx != x0.Args[1] { + if s4.AuxInt != 40 { break } - if mem != x0.Args[2] { + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZloadidx { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if x5.AuxInt != i+5 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + if x5.Aux != s { break } - if sh.AuxInt != 8 { + if p != x5.Args[0] { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if idx != x5.Args[1] { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { + if mem != x5.Args[2] { break } - i0 := x0.AuxInt - if x0.Aux != s { + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { break } - if p != x0.Args[0] { + if s5.AuxInt != 48 { break } - if idx != x0.Args[1] { + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZloadidx { break } - if mem != x0.Args[2] { + if x6.AuxInt != i+6 { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if x6.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} p idx mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + if p != x6.Args[0] { break } - if sh.AuxInt != 8 { + if idx != x6.Args[1] { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if mem != x6.Args[2] { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { break } - i0 := x0.AuxInt - if x0.Aux != s { + if s6.AuxInt != 56 { break } - if idx != x0.Args[0] { + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZloadidx { break } - if p != x0.Args[1] { + if x7.AuxInt != i+7 { break } - if mem != x0.Args[2] { + if x7.Aux != s { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if p != x7.Args[0] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) + if idx != x7.Args[1] { + break + } + if mem != x7.Args[2] { + break + } + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpS390XMOVDBRloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} idx p mem)) - // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZload [i] {s} p mem) s0:(SLDconst [8] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLDconst [16] x2:(MOVBZload [i-2] {s} p mem))) s2:(SLDconst [24] x3:(MOVBZload [i-3] {s} p mem))) s3:(SLDconst [32] x4:(MOVBZload [i-4] {s} p mem))) s4:(SLDconst [40] x5:(MOVBZload [i-5] {s} p mem))) s5:(SLDconst [48] x6:(MOVBZload [i-6] {s} p mem))) s6:(SLDconst [56] x7:(MOVBZload [i-7] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload [i-7] {s} p mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + o0 := v.Args[0] + if o0.Op != OpS390XOR { break } - if sh.AuxInt != 8 { + o1 := o0.Args[0] + if o1.Op != OpS390XOR { break } - x1 := sh.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + o2 := o1.Args[0] + if o2.Op != OpS390XOR { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - x0 := v.Args[1] - if x0.Op != OpS390XMOVBZloadidx { + o3 := o2.Args[0] + if o3.Op != OpS390XOR { break } - i0 := x0.AuxInt - if x0.Aux != s { + o4 := o3.Args[0] + if o4.Op != OpS390XOR { break } - if idx != x0.Args[0] { + o5 := o4.Args[0] + if o5.Op != OpS390XOR { break } - if p != x0.Args[1] { + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZload { break } - if mem != x0.Args[2] { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { break } - if !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(sh)) { + if s0.AuxInt != 8 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v1.AuxInt = i0 - v1.Aux = s - v1.AddArg(p) - v1.AddArg(idx) - v1.AddArg(mem) - v0.AddArg(v1) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if x1.AuxInt != i-1 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if x1.Aux != s { + break + } + if p != x1.Args[0] { break } - if sh.AuxInt != 16 { + if mem != x1.Args[1] { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if s1.AuxInt != 16 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { break } - if p != x1.Args[0] { + if x2.AuxInt != i-2 { break } - if idx != x1.Args[1] { + if x2.Aux != s { break } - if mem != x1.Args[2] { + if p != x2.Args[0] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if mem != x2.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if s2.AuxInt != 24 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZload { break } - if sh.AuxInt != 16 { + if x3.AuxInt != i-3 { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if x3.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if p != x3.Args[0] { break } - i1 := x1.AuxInt - if x1.Aux != s { + if mem != x3.Args[1] { break } - if p != x1.Args[0] { + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { break } - if idx != x1.Args[1] { + if s3.AuxInt != 32 { break } - if mem != x1.Args[2] { + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZload { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if x4.AuxInt != i-4 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { + if x4.Aux != s { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if p != x4.Args[0] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if mem != x4.Args[1] { break } - if sh.AuxInt != 16 { + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if s4.AuxInt != 40 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZload { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x5.AuxInt != i-5 { break } - if idx != x1.Args[0] { + if x5.Aux != s { break } - if p != x1.Args[1] { + if p != x5.Args[0] { break } - if mem != x1.Args[2] { + if mem != x5.Args[1] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - r0 := v.Args[0] - if r0.Op != OpS390XMOVHZreg { + if s5.AuxInt != 48 { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZload { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - sh := v.Args[1] - if sh.Op != OpS390XSLWconst { + if x6.AuxInt != i-6 { break } - if sh.AuxInt != 16 { + if x6.Aux != s { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if p != x6.Args[0] { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if mem != x6.Args[1] { break } - i1 := x1.AuxInt - if x1.Aux != s { + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { break } - if idx != x1.Args[0] { + if s6.AuxInt != 56 { break } - if p != x1.Args[1] { + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZload { break } - if mem != x1.Args[2] { + if x7.AuxInt != i-7 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if x7.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpS390XMOVDload, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v0.AuxInt = i0 + v0.AuxInt = i - 7 v0.Aux = s v0.AddArg(p) - v0.AddArg(idx) v0.AddArg(mem) return true } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) + // match: (OR o0:(OR o1:(OR o2:(OR o3:(OR o4:(OR o5:(OR x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLDconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLDconst [16] x2:(MOVBZloadidx [i-2] {s} p idx mem))) s2:(SLDconst [24] x3:(MOVBZloadidx [i-3] {s} p idx mem))) s3:(SLDconst [32] x4:(MOVBZloadidx [i-4] {s} p idx mem))) s4:(SLDconst [40] x5:(MOVBZloadidx [i-5] {s} p idx mem))) s5:(SLDconst [48] x6:(MOVBZloadidx [i-6] {s} p idx mem))) s6:(SLDconst [56] x7:(MOVBZloadidx [i-7] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx [i-7] {s} p idx mem) for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + o0 := v.Args[0] + if o0.Op != OpS390XOR { break } - if sh.AuxInt != 16 { + o1 := o0.Args[0] + if o1.Op != OpS390XOR { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + o2 := o1.Args[0] + if o2.Op != OpS390XOR { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + o3 := o2.Args[0] + if o3.Op != OpS390XOR { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { + o4 := o3.Args[0] + if o4.Op != OpS390XOR { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + o5 := o4.Args[0] + if o5.Op != OpS390XOR { break } - i0 := x0.AuxInt - if x0.Aux != s { + x0 := o5.Args[0] + if x0.Op != OpS390XMOVBZloadidx { break } - if p != x0.Args[0] { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := o5.Args[1] + if s0.Op != OpS390XSLDconst { break } - if idx != x0.Args[1] { + if s0.AuxInt != 8 { break } - if mem != x0.Args[2] { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if x1.AuxInt != i-1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + if x1.Aux != s { break } - if sh.AuxInt != 16 { + if p != x1.Args[0] { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if idx != x1.Args[1] { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if mem != x1.Args[2] { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { + s1 := o4.Args[1] + if s1.Op != OpS390XSLDconst { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if s1.AuxInt != 16 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { break } - if p != x0.Args[0] { + if x2.AuxInt != i-2 { break } - if idx != x0.Args[1] { + if x2.Aux != s { break } - if mem != x0.Args[2] { + if p != x2.Args[0] { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if idx != x2.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + if mem != x2.Args[2] { break } - if sh.AuxInt != 16 { + s2 := o3.Args[1] + if s2.Op != OpS390XSLDconst { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if s2.AuxInt != 24 { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + x3 := s2.Args[0] + if x3.Op != OpS390XMOVBZloadidx { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { + if x3.AuxInt != i-3 { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if x3.Aux != s { break } - i0 := x0.AuxInt - if x0.Aux != s { + if p != x3.Args[0] { break } - if idx != x0.Args[0] { + if idx != x3.Args[1] { break } - if p != x0.Args[1] { + if mem != x3.Args[2] { break } - if mem != x0.Args[2] { + s3 := o2.Args[1] + if s3.Op != OpS390XSLDconst { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if s3.AuxInt != 32 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) - // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) - // result: @mergePoint(b,x0,x1) (MOVWBRloadidx [i0] {s} p idx mem) - for { - sh := v.Args[0] - if sh.Op != OpS390XSLWconst { + x4 := s3.Args[0] + if x4.Op != OpS390XMOVBZloadidx { break } - if sh.AuxInt != 16 { + if x4.AuxInt != i-4 { break } - r1 := sh.Args[0] - if r1.Op != OpS390XMOVHZreg { + if x4.Aux != s { break } - x1 := r1.Args[0] - if x1.Op != OpS390XMOVHBRloadidx { + if p != x4.Args[0] { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - r0 := v.Args[1] - if r0.Op != OpS390XMOVHZreg { + if idx != x4.Args[1] { break } - x0 := r0.Args[0] - if x0.Op != OpS390XMOVHBRloadidx { + if mem != x4.Args[2] { break } - i0 := x0.AuxInt - if x0.Aux != s { + s4 := o1.Args[1] + if s4.Op != OpS390XSLDconst { break } - if idx != x0.Args[0] { + if s4.AuxInt != 40 { break } - if p != x0.Args[1] { + x5 := s4.Args[0] + if x5.Op != OpS390XMOVBZloadidx { break } - if mem != x0.Args[2] { + if x5.AuxInt != i-5 { break } - if !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh)) { + if x5.Aux != s { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, types.Int32) - v.reset(OpCopy) - v.AddArg(v0) - v0.AuxInt = i0 - v0.Aux = s - v0.AddArg(p) - v0.AddArg(idx) - v0.AddArg(mem) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + if p != x5.Args[0] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if idx != x5.Args[1] { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + if mem != x5.Args[2] { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + s5 := o0.Args[1] + if s5.Op != OpS390XSLDconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s5.AuxInt != 48 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x6 := s5.Args[0] + if x6.Op != OpS390XMOVBZloadidx { break } - if p != x0.Args[0] { + if x6.AuxInt != i-6 { break } - if idx != x0.Args[1] { + if x6.Aux != s { break } - if mem != x0.Args[2] { + if p != x6.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x6.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + if mem != x6.Args[2] { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + s6 := v.Args[1] + if s6.Op != OpS390XSLDconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + if s6.AuxInt != 56 { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + x7 := s6.Args[0] + if x7.Op != OpS390XMOVBZloadidx { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x7.AuxInt != i-7 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x7.Aux != s { break } - if p != x0.Args[0] { + if p != x7.Args[0] { break } - if idx != x0.Args[1] { + if idx != x7.Args[1] { break } - if mem != x0.Args[2] { + if mem != x7.Args[2] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpS390XMOVDloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i - 7 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + return false +} +func rewriteValueS390X_OpS390XORW(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (ORW x (MOVDconst [c])) + // cond: + // result: (ORWconst [c] x) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + c := v_1.AuxInt + v.reset(OpS390XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORW (MOVDconst [c]) x) + // cond: + // result: (ORWconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpS390XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORW (SLWconst x [c]) (SRWconst x [32-c])) + // cond: + // result: (RLLconst [ c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XSLWconst { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSRWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if v_1.AuxInt != 32-c { + break + } + if x != v_1.Args[0] { break } - i0 := x0.AuxInt - if x0.Aux != s { + v.reset(OpS390XRLLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: ( ORW (SRWconst x [c]) (SLWconst x [32-c])) + // cond: + // result: (RLLconst [32-c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XSRWconst { break } - if idx != x0.Args[0] { + c := v_0.AuxInt + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSLWconst { break } - if p != x0.Args[1] { + if v_1.AuxInt != 32-c { break } - if mem != x0.Args[2] { + if x != v_1.Args[0] { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + v.reset(OpS390XRLLconst) + v.AuxInt = 32 - c + v.AddArg(x) + return true + } + // match: (ORW x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y)) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW x g:(MOVWload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVWload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORW g:(MOVWload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) + for { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVWload { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORW x g:(MOVWZload [off] {sym} ptr mem)) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) + for { + t := v.Type + x := v.Args[0] + g := v.Args[1] + if g.Op != OpS390XMOVWZload { break } - i0 := x0.AuxInt - if x0.Aux != s { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if idx != x0.Args[0] { + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORW g:(MOVWZload [off] {sym} ptr mem) x) + // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) + // result: (ORWload [off] {sym} x ptr mem) + for { + t := v.Type + g := v.Args[0] + if g.Op != OpS390XMOVWZload { break } - if p != x0.Args[1] { + off := g.AuxInt + sym := g.Aux + ptr := g.Args[0] + mem := g.Args[1] + x := v.Args[1] + if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { break } - if mem != x0.Args[2] { + v.reset(OpS390XORWload) + v.Type = t + v.AuxInt = off + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORW x:(SLWconst _) y) + // cond: y.Op != OpS390XSLWconst + // result: (ORW y x) + for { + x := v.Args[0] + if x.Op != OpS390XSLWconst { break } - y := or.Args[1] - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + y := v.Args[1] + if !(y.Op != OpS390XSLWconst) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v.reset(OpS390XORW) + v.AddArg(y) + v.AddArg(x) return true } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i+1] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i] {s} p mem)) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZload { break } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + if s0.AuxInt != 8 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x1.AuxInt != i+1 { break } - if p != x0.Args[0] { + if x1.Aux != s { break } - if idx != x0.Args[1] { + if p != x1.Args[0] { break } - if mem != x0.Args[2] { + if mem != x1.Args[1] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpS390XMOVHBRload, types.UInt16) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRload [i] {s} p mem)) s0:(SLWconst [16] x1:(MOVBZload [i+2] {s} p mem))) s1:(SLWconst [24] x2:(MOVBZload [i+3] {s} p mem))) + // cond: p.Op != OpSB && z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWBRload [i] {s} p mem) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + o0 := v.Args[0] + if o0.Op != OpS390XORW { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + z0 := o0.Args[0] + if z0.Op != OpS390XMOVHZreg { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + x0 := z0.Args[0] + if x0.Op != OpS390XMOVHBRload { break } - y := or.Args[0] - s0 := or.Args[1] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] if s0.Op != OpS390XSLWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s0.AuxInt != 16 { break } - i0 := x0.AuxInt - if x0.Aux != s { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - if p != x0.Args[0] { + if x1.AuxInt != i+2 { break } - if idx != x0.Args[1] { + if x1.Aux != s { break } - if mem != x0.Args[2] { + if p != x1.Args[0] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x1.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - s1 := v.Args[0] + s1 := v.Args[1] if s1.Op != OpS390XSLWconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - s := x1.Aux - p := x1.Args[0] - idx := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { - break - } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + if s1.AuxInt != 24 { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x2.AuxInt != i+3 { break } - if idx != x0.Args[0] { + if x2.Aux != s { break } - if p != x0.Args[1] { + if p != x2.Args[0] { break } - if mem != x0.Args[2] { + if mem != x2.Args[1] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(p.Op != OpSB && z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpS390XMOVWBRload, types.UInt32) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORW s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i+1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i] {s} p idx mem)) for { - s1 := v.Args[0] - if s1.Op != OpS390XSLWconst { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZloadidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] + if s0.Op != OpS390XSLWconst { break } - i1 := x1.AuxInt - s := x1.Aux - idx := x1.Args[0] - p := x1.Args[1] - mem := x1.Args[2] - or := v.Args[1] - if or.Op != OpS390XORW { + if s0.AuxInt != 8 { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if x1.AuxInt != i+1 { break } - i0 := x0.AuxInt - if x0.Aux != s { + if x1.Aux != s { break } - if idx != x0.Args[0] { + if p != x1.Args[0] { break } - if p != x0.Args[1] { + if idx != x1.Args[1] { break } - if mem != x0.Args[2] { + if mem != x1.Args[2] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, v.Type) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW o0:(ORW z0:(MOVHZreg x0:(MOVHBRloadidx [i] {s} p idx mem)) s0:(SLWconst [16] x1:(MOVBZloadidx [i+2] {s} p idx mem))) s1:(SLWconst [24] x2:(MOVBZloadidx [i+3] {s} p idx mem))) + // cond: z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZreg (MOVWBRloadidx [i] {s} p idx mem)) for { - or := v.Args[0] - if or.Op != OpS390XORW { + o0 := v.Args[0] + if o0.Op != OpS390XORW { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + z0 := o0.Args[0] + if z0.Op != OpS390XMOVHZreg { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x0 := z0.Args[0] + if x0.Op != OpS390XMOVHBRloadidx { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + s0 := o0.Args[1] + if s0.Op != OpS390XSLWconst { + break + } + if s0.AuxInt != 16 { break } - j1 := s1.AuxInt - x1 := s1.Args[0] + x1 := s0.Args[0] if x1.Op != OpS390XMOVBZloadidx { break } - i1 := x1.AuxInt + if x1.AuxInt != i+2 { + break + } if x1.Aux != s { break } @@ -32017,495 +16303,305 @@ func rewriteValueS390X_OpS390XORW(v *Value) bool { if mem != x1.Args[2] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { - break - } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XORW { - break - } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + s1 := v.Args[1] + if s1.Op != OpS390XSLWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s1.AuxInt != 24 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x2.AuxInt != i+3 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x2.Aux != s { break } - if p != x1.Args[0] { + if p != x2.Args[0] { break } - if idx != x1.Args[1] { + if idx != x2.Args[1] { break } - if mem != x1.Args[2] { + if mem != x2.Args[2] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(z0.Uses == 1 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(z0) && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, types.UInt64) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) + v1 := b.NewValue0(v.Pos, OpS390XMOVWBRloadidx, v.Type) + v1.AuxInt = i + v1.Aux = s + v1.AddArg(p) + v1.AddArg(idx) + v1.AddArg(mem) v0.AddArg(v1) - v0.AddArg(y) return true } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW x0:(MOVBZload [i] {s} p mem) s0:(SLWconst [8] x1:(MOVBZload [i-1] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZload [i-1] {s} p mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZload { break } - y := or.Args[0] - s0 := or.Args[1] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + mem := x0.Args[1] + s0 := v.Args[1] if s0.Op != OpS390XSLWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s0.AuxInt != 8 { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x1.AuxInt != i-1 { break } - i1 := x1.AuxInt if x1.Aux != s { break } if p != x1.Args[0] { break } - if idx != x1.Args[1] { - break - } - if mem != x1.Args[2] { + if mem != x1.Args[1] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, types.UInt16) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i - 1 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW o0:(ORW x0:(MOVHZload [i] {s} p mem) s0:(SLWconst [16] x1:(MOVBZload [i-1] {s} p mem))) s1:(SLWconst [24] x2:(MOVBZload [i-2] {s} p mem))) + // cond: p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZload [i-2] {s} p mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + o0 := v.Args[0] + if o0.Op != OpS390XORW { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x0 := o0.Args[0] + if x0.Op != OpS390XMOVHZload { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { - break - } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { - break - } - i1 := x1.AuxInt - if x1.Aux != s { - break - } - if p != x1.Args[0] { + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] + if s0.Op != OpS390XSLWconst { break } - if idx != x1.Args[1] { + if s0.AuxInt != 16 { break } - if mem != x1.Args[2] { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZload { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if x1.AuxInt != i-1 { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XORW { + if x1.Aux != s { break } - s0 := or.Args[0] - if s0.Op != OpS390XSLWconst { + if p != x1.Args[0] { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if mem != x1.Args[1] { break } - i0 := x0.AuxInt - s := x0.Aux - p := x0.Args[0] - idx := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] s1 := v.Args[1] if s1.Op != OpS390XSLWconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if s1.AuxInt != 24 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZload { break } - if idx != x1.Args[0] { + if x2.AuxInt != i-2 { break } - if p != x1.Args[1] { + if x2.Aux != s { break } - if mem != x1.Args[2] { + if p != x2.Args[0] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if mem != x2.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + if !(p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, types.UInt32) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i - 2 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) return true } - // match: (ORW or:(ORW s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) y) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW x0:(MOVBZloadidx [i] {s} p idx mem) s0:(SLWconst [8] x1:(MOVBZloadidx [i-1] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) + // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i-1] {s} p idx mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + x0 := v.Args[0] + if x0.Op != OpS390XMOVBZloadidx { break } - s0 := or.Args[0] + i := x0.AuxInt + s := x0.Aux + p := x0.Args[0] + idx := x0.Args[1] + mem := x0.Args[2] + s0 := v.Args[1] if s0.Op != OpS390XSLWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s0.AuxInt != 8 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - y := or.Args[1] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x1.AuxInt != i-1 { break } - i1 := x1.AuxInt if x1.Aux != s { break } - if idx != x1.Args[0] { + if p != x1.Args[0] { break } - if p != x1.Args[1] { + if idx != x1.Args[1] { break } if mem != x1.Args[2] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0)) { break } b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i - 1 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) + // match: (ORW o0:(ORW x0:(MOVHZloadidx [i] {s} p idx mem) s0:(SLWconst [16] x1:(MOVBZloadidx [i-1] {s} p idx mem))) s1:(SLWconst [24] x2:(MOVBZloadidx [i-2] {s} p idx mem))) + // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZloadidx [i-2] {s} p idx mem) for { - or := v.Args[0] - if or.Op != OpS390XORW { + o0 := v.Args[0] + if o0.Op != OpS390XORW { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { - break - } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + x0 := o0.Args[0] + if x0.Op != OpS390XMOVHZloadidx { break } - i0 := x0.AuxInt + i := x0.AuxInt s := x0.Aux p := x0.Args[0] idx := x0.Args[1] mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + s0 := o0.Args[1] + if s0.Op != OpS390XSLWconst { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if s0.AuxInt != 16 { break } - i1 := x1.AuxInt - if x1.Aux != s { + x1 := s0.Args[0] + if x1.Op != OpS390XMOVBZloadidx { break } - if idx != x1.Args[0] { + if x1.AuxInt != i-1 { break } - if p != x1.Args[1] { + if x1.Aux != s { break } - if mem != x1.Args[2] { + if p != x1.Args[0] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if idx != x1.Args[1] { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) - v.reset(OpCopy) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) - return true - } - // match: (ORW or:(ORW y s0:(SLWconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLWconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) - // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) - // result: @mergePoint(b,x0,x1) (ORW (SLWconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) - for { - or := v.Args[0] - if or.Op != OpS390XORW { + if mem != x1.Args[2] { break } - y := or.Args[0] - s0 := or.Args[1] - if s0.Op != OpS390XSLWconst { + s1 := v.Args[1] + if s1.Op != OpS390XSLWconst { break } - j0 := s0.AuxInt - x0 := s0.Args[0] - if x0.Op != OpS390XMOVBZloadidx { + if s1.AuxInt != 24 { break } - i0 := x0.AuxInt - s := x0.Aux - idx := x0.Args[0] - p := x0.Args[1] - mem := x0.Args[2] - s1 := v.Args[1] - if s1.Op != OpS390XSLWconst { + x2 := s1.Args[0] + if x2.Op != OpS390XMOVBZloadidx { break } - j1 := s1.AuxInt - x1 := s1.Args[0] - if x1.Op != OpS390XMOVBZloadidx { + if x2.AuxInt != i-2 { break } - i1 := x1.AuxInt - if x1.Aux != s { + if x2.Aux != s { break } - if idx != x1.Args[0] { + if p != x2.Args[0] { break } - if p != x1.Args[1] { + if idx != x2.Args[1] { break } - if mem != x1.Args[2] { + if mem != x2.Args[2] { break } - if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or)) { + if !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { break } - b = mergePoint(b, x0, x1) - v0 := b.NewValue0(v.Pos, OpS390XORW, v.Type) + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, v.Type) v.reset(OpCopy) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpS390XSLWconst, v.Type) - v1.AuxInt = j0 - v2 := b.NewValue0(v.Pos, OpS390XMOVHZreg, types.UInt64) - v3 := b.NewValue0(v.Pos, OpS390XMOVHBRloadidx, types.Int16) - v3.AuxInt = i0 - v3.Aux = s - v3.AddArg(p) - v3.AddArg(idx) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v0.AddArg(y) + v0.AuxInt = i - 2 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(idx) + v0.AddArg(mem) return true } return false @@ -33342,9 +17438,9 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.AddArg(x) return true } - // match: (XOR (SLDconst x [c]) (SRDconst x [d])) - // cond: d == 64-c - // result: (RLLGconst [c] x) + // match: (XOR (SLDconst x [c]) (SRDconst x [64-c])) + // cond: + // result: (RLLGconst [ c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSLDconst { @@ -33356,11 +17452,10 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { if v_1.Op != OpS390XSRDconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLGconst) @@ -33368,29 +17463,28 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.AddArg(x) return true } - // match: (XOR (SRDconst x [d]) (SLDconst x [c])) - // cond: d == 64-c - // result: (RLLGconst [c] x) + // match: (XOR (SRDconst x [c]) (SLDconst x [64-c])) + // cond: + // result: (RLLGconst [64-c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSRDconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XSLDconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 64-c { break } - if !(d == 64-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLGconst) - v.AuxInt = c + v.AuxInt = 64 - c v.AddArg(x) return true } @@ -33412,24 +17506,6 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.AuxInt = c ^ d return true } - // match: (XOR (MOVDconst [d]) (MOVDconst [c])) - // cond: - // result: (MOVDconst [c^d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpS390XMOVDconst { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpS390XMOVDconst { - break - } - c := v_1.AuxInt - v.reset(OpS390XMOVDconst) - v.AuxInt = c ^ d - return true - } // match: (XOR x x) // cond: // result: (MOVDconst [0]) @@ -33494,58 +17570,6 @@ func rewriteValueS390X_OpS390XXOR(v *Value) bool { v.AddArg(mem) return true } - // match: (XOR g:(MOVDload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XOR x g:(MOVDload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVDload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XXORW(v *Value) bool { @@ -33579,9 +17603,9 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.AddArg(x) return true } - // match: (XORW (SLWconst x [c]) (SRWconst x [d])) - // cond: d == 32-c - // result: (RLLconst [c] x) + // match: (XORW (SLWconst x [c]) (SRWconst x [32-c])) + // cond: + // result: (RLLconst [ c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSLWconst { @@ -33593,11 +17617,10 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { if v_1.Op != OpS390XSRWconst { break } - d := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLconst) @@ -33605,29 +17628,28 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.AddArg(x) return true } - // match: (XORW (SRWconst x [d]) (SLWconst x [c])) - // cond: d == 32-c - // result: (RLLconst [c] x) + // match: (XORW (SRWconst x [c]) (SLWconst x [32-c])) + // cond: + // result: (RLLconst [32-c] x) for { v_0 := v.Args[0] if v_0.Op != OpS390XSRWconst { break } - d := v_0.AuxInt + c := v_0.AuxInt x := v_0.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XSLWconst { break } - c := v_1.AuxInt - if x != v_1.Args[0] { + if v_1.AuxInt != 32-c { break } - if !(d == 32-c) { + if x != v_1.Args[0] { break } v.reset(OpS390XRLLconst) - v.AuxInt = c + v.AuxInt = 32 - c v.AddArg(x) return true } @@ -33695,58 +17717,6 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.AddArg(mem) return true } - // match: (XORW g:(MOVWload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XORW x g:(MOVWload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (XORWload [off] {sym} x ptr mem) @@ -33799,58 +17769,6 @@ func rewriteValueS390X_OpS390XXORW(v *Value) bool { v.AddArg(mem) return true } - // match: (XORW g:(MOVWZload [off] {sym} ptr mem) x) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - g := v.Args[0] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - x := v.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (XORW x g:(MOVWZload [off] {sym} ptr mem)) - // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) - // result: (XORWload [off] {sym} x ptr mem) - for { - t := v.Type - x := v.Args[0] - g := v.Args[1] - if g.Op != OpS390XMOVWZload { - break - } - off := g.AuxInt - sym := g.Aux - ptr := g.Args[0] - mem := g.Args[1] - if !(ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g)) { - break - } - v.reset(OpS390XXORWload) - v.Type = t - v.AuxInt = off - v.Aux = sym - v.AddArg(x) - v.AddArg(ptr) - v.AddArg(mem) - return true - } return false } func rewriteValueS390X_OpS390XXORWconst(v *Value) bool { @@ -33956,7 +17874,7 @@ func rewriteValueS390X_OpSelect0(v *Value) bool { return false } func rewriteValueS390X_OpSelect1(v *Value) bool { - // match: (Select1 (AddTupleFirst32 tuple _)) + // match: (Select1 (AddTupleFirst32 tuple _ )) // cond: // result: (Select1 tuple) for { @@ -33969,7 +17887,7 @@ func rewriteValueS390X_OpSelect1(v *Value) bool { v.AddArg(tuple) return true } - // match: (Select1 (AddTupleFirst64 tuple _)) + // match: (Select1 (AddTupleFirst64 tuple _ )) // cond: // result: (Select1 tuple) for { @@ -34018,7 +17936,7 @@ func rewriteValueS390X_OpSignExt32to64(v *Value) bool { } } func rewriteValueS390X_OpSignExt8to16(v *Value) bool { - // match: (SignExt8to16 x) + // match: (SignExt8to16 x) // cond: // result: (MOVBreg x) for { @@ -34029,7 +17947,7 @@ func rewriteValueS390X_OpSignExt8to16(v *Value) bool { } } func rewriteValueS390X_OpSignExt8to32(v *Value) bool { - // match: (SignExt8to32 x) + // match: (SignExt8to32 x) // cond: // result: (MOVBreg x) for { @@ -34040,7 +17958,7 @@ func rewriteValueS390X_OpSignExt8to32(v *Value) bool { } } func rewriteValueS390X_OpSignExt8to64(v *Value) bool { - // match: (SignExt8to64 x) + // match: (SignExt8to64 x) // cond: // result: (MOVBreg x) for { @@ -34199,7 +18117,7 @@ func rewriteValueS390X_OpStore(v *Value) bool { return false } func rewriteValueS390X_OpSub16(v *Value) bool { - // match: (Sub16 x y) + // match: (Sub16 x y) // cond: // result: (SUBW x y) for { @@ -34212,7 +18130,7 @@ func rewriteValueS390X_OpSub16(v *Value) bool { } } func rewriteValueS390X_OpSub32(v *Value) bool { - // match: (Sub32 x y) + // match: (Sub32 x y) // cond: // result: (SUBW x y) for { @@ -34238,7 +18156,7 @@ func rewriteValueS390X_OpSub32F(v *Value) bool { } } func rewriteValueS390X_OpSub64(v *Value) bool { - // match: (Sub64 x y) + // match: (Sub64 x y) // cond: // result: (SUB x y) for { @@ -34264,7 +18182,7 @@ func rewriteValueS390X_OpSub64F(v *Value) bool { } } func rewriteValueS390X_OpSub8(v *Value) bool { - // match: (Sub8 x y) + // match: (Sub8 x y) // cond: // result: (SUBW x y) for { @@ -34290,7 +18208,7 @@ func rewriteValueS390X_OpSubPtr(v *Value) bool { } } func rewriteValueS390X_OpTrunc16to8(v *Value) bool { - // match: (Trunc16to8 x) + // match: (Trunc16to8 x) // cond: // result: x for { @@ -34314,7 +18232,7 @@ func rewriteValueS390X_OpTrunc32to16(v *Value) bool { } } func rewriteValueS390X_OpTrunc32to8(v *Value) bool { - // match: (Trunc32to8 x) + // match: (Trunc32to8 x) // cond: // result: x for { @@ -34350,7 +18268,7 @@ func rewriteValueS390X_OpTrunc64to32(v *Value) bool { } } func rewriteValueS390X_OpTrunc64to8(v *Value) bool { - // match: (Trunc64to8 x) + // match: (Trunc64to8 x) // cond: // result: x for { @@ -34401,7 +18319,7 @@ func rewriteValueS390X_OpXor64(v *Value) bool { } } func rewriteValueS390X_OpXor8(v *Value) bool { - // match: (Xor8 x y) + // match: (Xor8 x y) // cond: // result: (XORW x y) for { @@ -34637,7 +18555,7 @@ func rewriteValueS390X_OpZeroExt32to64(v *Value) bool { } } func rewriteValueS390X_OpZeroExt8to16(v *Value) bool { - // match: (ZeroExt8to16 x) + // match: (ZeroExt8to16 x) // cond: // result: (MOVBZreg x) for { @@ -34648,7 +18566,7 @@ func rewriteValueS390X_OpZeroExt8to16(v *Value) bool { } } func rewriteValueS390X_OpZeroExt8to32(v *Value) bool { - // match: (ZeroExt8to32 x) + // match: (ZeroExt8to32 x) // cond: // result: (MOVBZreg x) for { @@ -34659,7 +18577,7 @@ func rewriteValueS390X_OpZeroExt8to32(v *Value) bool { } } func rewriteValueS390X_OpZeroExt8to64(v *Value) bool { - // match: (ZeroExt8to64 x) + // match: (ZeroExt8to64 x) // cond: // result: (MOVBZreg x) for { diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go index 2be17ef459..2782316c7e 100644 --- a/src/cmd/compile/internal/ssa/rewritedec.go +++ b/src/cmd/compile/internal/ssa/rewritedec.go @@ -34,7 +34,7 @@ func rewriteValuedec(v *Value) bool { return false } func rewriteValuedec_OpComplexImag(v *Value) bool { - // match: (ComplexImag (ComplexMake _ imag)) + // match: (ComplexImag (ComplexMake _ imag )) // cond: // result: imag for { @@ -51,7 +51,7 @@ func rewriteValuedec_OpComplexImag(v *Value) bool { return false } func rewriteValuedec_OpComplexReal(v *Value) bool { - // match: (ComplexReal (ComplexMake real _)) + // match: (ComplexReal (ComplexMake real _ )) // cond: // result: real for { @@ -274,7 +274,7 @@ func rewriteValuedec_OpSliceLen(v *Value) bool { return false } func rewriteValuedec_OpSlicePtr(v *Value) bool { - // match: (SlicePtr (SliceMake ptr _ _)) + // match: (SlicePtr (SliceMake ptr _ _ )) // cond: // result: ptr for { diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 7f440875d0..8ab751797b 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -422,7 +422,7 @@ func rewriteValuegeneric(v *Value) bool { func rewriteValuegeneric_OpAdd16(v *Value) bool { b := v.Block _ = b - // match: (Add16 (Const16 [c]) (Const16 [d])) + // match: (Add16 (Const16 [c]) (Const16 [d])) // cond: // result: (Const16 [int64(int16(c+d))]) for { @@ -440,22 +440,25 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AuxInt = int64(int16(c + d)) return true } - // match: (Add16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (Const16 [int64(int16(c+d))]) + // match: (Add16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Add16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c + d)) + if !(x.Op != OpConst16) { + break + } + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Add16 (Const16 [0]) x) @@ -475,23 +478,6 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AddArg(x) return true } - // match: (Add16 x (Const16 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Add16 (Const16 [1]) (Com16 x)) // cond: // result: (Neg16 x) @@ -512,23 +498,20 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AddArg(x) return true } - // match: (Add16 (Com16 x) (Const16 [1])) - // cond: - // result: (Neg16 x) + // match: (Add16 x l:(Add16 _ _)) + // cond: (x.Op != OpAdd16 && x.Op != OpConst16) + // result: (Add16 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpCom16 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAdd16 { break } - if v_1.AuxInt != 1 { + if !(x.Op != OpAdd16 && x.Op != OpConst16) { break } - v.reset(OpNeg16) + v.reset(OpAdd16) + v.AddArg(l) v.AddArg(x) return true } @@ -558,84 +541,6 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AddArg(v0) return true } - // match: (Add16 (Add16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add16 x (Add16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add16 x (Add16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Add16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Add16 (Sub16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) @@ -688,58 +593,6 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AddArg(v0) return true } - // match: (Add16 x (Sub16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Sub16 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add16 (Sub16 i:(Const16 ) z) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Add16 i (Sub16 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAdd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } // match: (Add16 (Sub16 z i:(Const16 )) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Sub16 (Add16 x z) i) @@ -792,61 +645,39 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { v.AddArg(i) return true } - // match: (Add16 x (Sub16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Sub16 (Add16 x z) i) + // match: (Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // cond: + // result: (Add16 (Const16 [int64(int16(c+d))]) x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { + v_0 := v.Args[0] + if v_0.Op != OpConst16 { break } - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add16 (Sub16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Sub16 (Add16 x z) i) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub16 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAdd16 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { + if v_1_0.Type != t { break } - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpAdd16, t) - v0.AddArg(x) - v0.AddArg(z) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpAdd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c + d)) v.AddArg(v0) - v.AddArg(i) + v.AddArg(x) return true } - // match: (Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) + // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // cond: - // result: (Add16 (Const16 [int64(int16(c+d))]) x) + // result: (Sub16 (Const16 [int64(int16(c+d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst16 { @@ -855,7 +686,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd16 { + if v_1.Op != OpSub16 { break } v_1_0 := v_1.Args[0] @@ -867,16 +698,16 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpAdd16) + v.reset(OpSub16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c + d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add16 (Const16 [c]) (Add16 x (Const16 [d]))) + // match: (Add16 (Const16 [c]) (Sub16 x (Const16 [d]))) // cond: - // result: (Add16 (Const16 [int64(int16(c+d))]) x) + // result: (Add16 (Const16 [int64(int16(c-d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst16 { @@ -885,7 +716,7 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd16 { + if v_1.Op != OpSub16 { break } x := v_1.Args[0] @@ -899,202 +730,22 @@ func rewriteValuegeneric_OpAdd16(v *Value) bool { d := v_1_1.AuxInt v.reset(OpAdd16) v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) + v0.AuxInt = int64(int16(c - d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add16 (Add16 (Const16 [d]) x) (Const16 [c])) + return false +} +func rewriteValuegeneric_OpAdd32(v *Value) bool { + b := v.Block + _ = b + // match: (Add32 (Const32 [c]) (Const32 [d])) // cond: - // result: (Add16 (Const16 [int64(int16(c+d))]) x) + // result: (Const32 [int64(int32(c+d))]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Add16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Add16 (Const16 [int64(int16(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) - // cond: - // result: (Sub16 (Const16 [int64(int16(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Sub16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Sub16 (Const16 [int64(int16(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Const16 [c]) (Sub16 x (Const16 [d]))) - // cond: - // result: (Add16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add16 (Sub16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Add16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAdd32(v *Value) bool { - b := v.Block - _ = b - // match: (Add32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (Const32 [int64(int32(c+d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt @@ -1107,22 +758,25 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.AuxInt = int64(int32(c + d)) return true } - // match: (Add32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (Const32 [int64(int32(c+d))]) + // match: (Add32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Add32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst32 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c + d)) + if !(x.Op != OpConst32) { + break + } + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Add32 (Const32 [0]) x) @@ -1142,23 +796,6 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.AddArg(x) return true } - // match: (Add32 x (Const32 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Add32 (Const32 [1]) (Com32 x)) // cond: // result: (Neg32 x) @@ -1179,23 +816,20 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.AddArg(x) return true } - // match: (Add32 (Com32 x) (Const32 [1])) - // cond: - // result: (Neg32 x) + // match: (Add32 x l:(Add32 _ _)) + // cond: (x.Op != OpAdd32 && x.Op != OpConst32) + // result: (Add32 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpCom32 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAdd32 { break } - if v_1.AuxInt != 1 { + if !(x.Op != OpAdd32 && x.Op != OpConst32) { break } - v.reset(OpNeg32) + v.reset(OpAdd32) + v.AddArg(l) v.AddArg(x) return true } @@ -1225,39 +859,39 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.AddArg(v0) return true } - // match: (Add32 (Add32 z i:(Const32 )) x) + // match: (Add32 (Sub32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) + // result: (Add32 i (Sub32 x z)) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { + if v_0.Op != OpSub32 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst32 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst32 && x.Op != OpConst32) { break } v.reset(OpAdd32) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub32, t) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (Add32 x (Add32 i:(Const32 ) z)) + // match: (Add32 x (Sub32 i:(Const32 ) z)) // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) + // result: (Add32 i (Sub32 x z)) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd32 { + if v_1.Op != OpSub32 { break } i := v_1.Args[0] @@ -1271,157 +905,27 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } v.reset(OpAdd32) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub32, t) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (Add32 x (Add32 z i:(Const32 ))) + // match: (Add32 (Sub32 z i:(Const32 )) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Add32 z x)) + // result: (Sub32 (Add32 x z) i) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { + v_0 := v.Args[0] + if v_0.Op != OpSub32 { break } - z := v_1.Args[0] - i := v_1.Args[1] + z := v_0.Args[0] + i := v_0.Args[1] if i.Op != OpConst32 { break } t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add32 (Sub32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Sub32 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 x (Sub32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Sub32 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 x (Sub32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Sub32 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Sub32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Add32 i (Sub32 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAdd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add32 (Sub32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Sub32 (Add32 x z) i) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - x := v.Args[1] + x := v.Args[1] if !(z.Op != OpConst32 && x.Op != OpConst32) { break } @@ -1459,61 +963,39 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { v.AddArg(i) return true } - // match: (Add32 x (Sub32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Sub32 (Add32 x z) i) + // match: (Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // cond: + // result: (Add32 (Const32 [int64(int32(c+d))]) x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { + v_0 := v.Args[0] + if v_0.Op != OpConst32 { break } - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add32 (Sub32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Sub32 (Add32 x z) i) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAdd32 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if v_1_0.Type != t { break } - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpAdd32, t) - v0.AddArg(x) - v0.AddArg(z) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpAdd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c + d)) v.AddArg(v0) - v.AddArg(i) + v.AddArg(x) return true } - // match: (Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) + // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // cond: - // result: (Add32 (Const32 [int64(int32(c+d))]) x) + // result: (Sub32 (Const32 [int64(int32(c+d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -1522,7 +1004,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd32 { + if v_1.Op != OpSub32 { break } v_1_0 := v_1.Args[0] @@ -1534,16 +1016,16 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpAdd32) + v.reset(OpSub32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c + d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add32 (Const32 [c]) (Add32 x (Const32 [d]))) + // match: (Add32 (Const32 [c]) (Sub32 x (Const32 [d]))) // cond: - // result: (Add32 (Const32 [int64(int32(c+d))]) x) + // result: (Add32 (Const32 [int64(int32(c-d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -1552,7 +1034,7 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd32 { + if v_1.Op != OpSub32 { break } x := v_1.Args[0] @@ -1566,256 +1048,58 @@ func rewriteValuegeneric_OpAdd32(v *Value) bool { d := v_1_1.AuxInt v.reset(OpAdd32) v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) + v0.AuxInt = int64(int32(c - d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add32 (Add32 (Const32 [d]) x) (Const32 [c])) + return false +} +func rewriteValuegeneric_OpAdd32F(v *Value) bool { + // match: (Add32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Add32 (Const32 [int64(int32(c+d))]) x) + // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { + if v_0.Op != OpConst32F { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + d := v_1.AuxInt + v.reset(OpConst32F) + v.AuxInt = f2i(float64(i2f32(c) + i2f32(d))) + return true + } + // match: (Add32F x (Const32F [0])) + // cond: + // result: x + for { + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst32F { break } - if v_1.Type != t { + if v_1.AuxInt != 0 { break } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (Add32 (Add32 x (Const32 [d])) (Const32 [c])) + // match: (Add32F (Const32F [0]) x) // cond: - // result: (Add32 (Const32 [int64(int32(c+d))]) x) + // result: x for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_0.Op != OpConst32F { break } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) - // cond: - // result: (Sub32 (Const32 [int64(int32(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Sub32 (Const32 [d]) x) (Const32 [c])) - // cond: - // result: (Sub32 (Const32 [int64(int32(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Const32 [c]) (Sub32 x (Const32 [d]))) - // cond: - // result: (Add32 (Const32 [int64(int32(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub32 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add32 (Sub32 x (Const32 [d])) (Const32 [c])) - // cond: - // result: (Add32 (Const32 [int64(int32(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAdd32F(v *Value) bool { - // match: (Add32F (Const32F [c]) (Const32F [d])) - // cond: - // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - d := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) + i2f32(d))) - return true - } - // match: (Add32F (Const32F [d]) (Const32F [c])) - // cond: - // result: (Const32F [f2i(float64(i2f32(c) + i2f32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) + i2f32(d))) - return true - } - // match: (Add32F x (Const32F [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add32F (Const32F [0]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - if v_0.AuxInt != 0 { + if v_0.AuxInt != 0 { break } x := v.Args[1] @@ -1829,7 +1113,7 @@ func rewriteValuegeneric_OpAdd32F(v *Value) bool { func rewriteValuegeneric_OpAdd64(v *Value) bool { b := v.Block _ = b - // match: (Add64 (Const64 [c]) (Const64 [d])) + // match: (Add64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c+d]) for { @@ -1847,22 +1131,25 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.AuxInt = c + d return true } - // match: (Add64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (Const64 [c+d]) + // match: (Add64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Add64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c + d + if !(x.Op != OpConst64) { + break + } + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Add64 (Const64 [0]) x) @@ -1882,23 +1169,6 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.AddArg(x) return true } - // match: (Add64 x (Const64 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Add64 (Const64 [1]) (Com64 x)) // cond: // result: (Neg64 x) @@ -1919,23 +1189,20 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.AddArg(x) return true } - // match: (Add64 (Com64 x) (Const64 [1])) - // cond: - // result: (Neg64 x) + // match: (Add64 x l:(Add64 _ _)) + // cond: (x.Op != OpAdd64 && x.Op != OpConst64) + // result: (Add64 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpCom64 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAdd64 { break } - if v_1.AuxInt != 1 { + if !(x.Op != OpAdd64 && x.Op != OpConst64) { break } - v.reset(OpNeg64) + v.reset(OpAdd64) + v.AddArg(l) v.AddArg(x) return true } @@ -1965,39 +1232,39 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.AddArg(v0) return true } - // match: (Add64 (Add64 z i:(Const64 )) x) + // match: (Add64 (Sub64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) + // result: (Add64 i (Sub64 x z)) for { v_0 := v.Args[0] - if v_0.Op != OpAdd64 { + if v_0.Op != OpSub64 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst64 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst64 && x.Op != OpConst64) { break } v.reset(OpAdd64) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub64, t) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (Add64 x (Add64 i:(Const64 ) z)) + // match: (Add64 x (Sub64 i:(Const64 ) z)) // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) + // result: (Add64 i (Sub64 x z)) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd64 { + if v_1.Op != OpSub64 { break } i := v_1.Args[0] @@ -2011,139 +1278,9 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } v.reset(OpAdd64) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub64, t) v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add64 x (Add64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Add64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add64 (Sub64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Sub64 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 x (Sub64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Sub64 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 x (Sub64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Sub64 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add64 (Sub64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Add64 i (Sub64 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAdd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub64, t) - v0.AddArg(x) - v0.AddArg(z) + v0.AddArg(z) v.AddArg(v0) return true } @@ -2199,61 +1336,39 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { v.AddArg(i) return true } - // match: (Add64 x (Sub64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Sub64 (Add64 x z) i) + // match: (Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // cond: + // result: (Add64 (Const64 [c+d]) x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { + v_0 := v.Args[0] + if v_0.Op != OpConst64 { break } - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add64 (Sub64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Sub64 (Add64 x z) i) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub64 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAdd64 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { + if v_1_0.Type != t { break } - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpAdd64, t) - v0.AddArg(x) - v0.AddArg(z) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpAdd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + d v.AddArg(v0) - v.AddArg(i) + v.AddArg(x) return true } - // match: (Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // cond: - // result: (Add64 (Const64 [c+d]) x) + // result: (Sub64 (Const64 [c+d]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -2262,7 +1377,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd64 { + if v_1.Op != OpSub64 { break } v_1_0 := v_1.Args[0] @@ -2274,16 +1389,16 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpAdd64) + v.reset(OpSub64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c + d v.AddArg(v0) v.AddArg(x) return true } - // match: (Add64 (Const64 [c]) (Add64 x (Const64 [d]))) + // match: (Add64 (Const64 [c]) (Sub64 x (Const64 [d]))) // cond: - // result: (Add64 (Const64 [c+d]) x) + // result: (Add64 (Const64 [c-d]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -2292,7 +1407,7 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd64 { + if v_1.Op != OpSub64 { break } x := v_1.Args[0] @@ -2306,256 +1421,58 @@ func rewriteValuegeneric_OpAdd64(v *Value) bool { d := v_1_1.AuxInt v.reset(OpAdd64) v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d + v0.AuxInt = c - d v.AddArg(v0) v.AddArg(x) return true } - // match: (Add64 (Add64 (Const64 [d]) x) (Const64 [c])) + return false +} +func rewriteValuegeneric_OpAdd64F(v *Value) bool { + // match: (Add64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Add64 (Const64 [c+d]) x) + // result: (Const64F [f2i(i2f(c) + i2f(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd64 { + if v_0.Op != OpConst64F { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + d := v_1.AuxInt + v.reset(OpConst64F) + v.AuxInt = f2i(i2f(c) + i2f(d)) + return true + } + // match: (Add64F x (Const64F [0])) + // cond: + // result: x + for { + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst64F { break } - if v_1.Type != t { + if v_1.AuxInt != 0 { break } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (Add64 (Add64 x (Const64 [d])) (Const64 [c])) + // match: (Add64F (Const64F [0]) x) // cond: - // result: (Add64 (Const64 [c+d]) x) + // result: x for { v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.Op != OpConst64F { break } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) - // cond: - // result: (Sub64 (Const64 [c+d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Sub64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (Sub64 (Const64 [c+d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c + d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Const64 [c]) (Sub64 x (Const64 [d]))) - // cond: - // result: (Add64 (Const64 [c-d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add64 (Sub64 x (Const64 [d])) (Const64 [c])) - // cond: - // result: (Add64 (Const64 [c-d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAdd64F(v *Value) bool { - // match: (Add64F (Const64F [c]) (Const64F [d])) - // cond: - // result: (Const64F [f2i(i2f(c) + i2f(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) + i2f(d)) - return true - } - // match: (Add64F (Const64F [d]) (Const64F [c])) - // cond: - // result: (Const64F [f2i(i2f(c) + i2f(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) + i2f(d)) - return true - } - // match: (Add64F x (Const64F [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add64F (Const64F [0]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - if v_0.AuxInt != 0 { + if v_0.AuxInt != 0 { break } x := v.Args[1] @@ -2569,7 +1486,7 @@ func rewriteValuegeneric_OpAdd64F(v *Value) bool { func rewriteValuegeneric_OpAdd8(v *Value) bool { b := v.Block _ = b - // match: (Add8 (Const8 [c]) (Const8 [d])) + // match: (Add8 (Const8 [c]) (Const8 [d])) // cond: // result: (Const8 [int64(int8(c+d))]) for { @@ -2587,25 +1504,28 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AuxInt = int64(int8(c + d)) return true } - // match: (Add8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (Const8 [int64(int8(c+d))]) + // match: (Add8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Add8 (Const8 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst8 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c + d)) + if !(x.Op != OpConst8) { + break + } + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (Add8 (Const8 [0]) x) + // match: (Add8 (Const8 [0]) x) // cond: // result: x for { @@ -2622,24 +1542,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AddArg(x) return true } - // match: (Add8 x (Const8 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Add8 (Const8 [1]) (Com8 x)) + // match: (Add8 (Const8 [1]) (Com8 x)) // cond: // result: (Neg8 x) for { @@ -2659,27 +1562,24 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AddArg(x) return true } - // match: (Add8 (Com8 x) (Const8 [1])) - // cond: - // result: (Neg8 x) + // match: (Add8 x l:(Add8 _ _)) + // cond: (x.Op != OpAdd8 && x.Op != OpConst8) + // result: (Add8 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpCom8 { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAdd8 { break } - if v_1.AuxInt != 1 { + if !(x.Op != OpAdd8 && x.Op != OpConst8) { break } - v.reset(OpNeg8) + v.reset(OpAdd8) + v.AddArg(l) v.AddArg(x) return true } - // match: (Add8 (Add8 i:(Const8 ) z) x) + // match: (Add8 (Add8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Add8 z x)) for { @@ -2705,39 +1605,39 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AddArg(v0) return true } - // match: (Add8 (Add8 z i:(Const8 )) x) + // match: (Add8 (Sub8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) + // result: (Add8 i (Sub8 x z)) for { v_0 := v.Args[0] - if v_0.Op != OpAdd8 { + if v_0.Op != OpSub8 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst8 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst8 && x.Op != OpConst8) { break } v.reset(OpAdd8) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub8, t) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (Add8 x (Add8 i:(Const8 ) z)) + // match: (Add8 x (Sub8 i:(Const8 ) z)) // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) + // result: (Add8 i (Sub8 x z)) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd8 { + if v_1.Op != OpSub8 { break } i := v_1.Args[0] @@ -2751,145 +1651,15 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } v.reset(OpAdd8) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) + v0 := b.NewValue0(v.Pos, OpSub8, t) v0.AddArg(x) + v0.AddArg(z) v.AddArg(v0) return true } - // match: (Add8 x (Add8 z i:(Const8 ))) + // match: (Add8 (Sub8 z i:(Const8 )) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Add8 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Add8 (Sub8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Sub8 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 x (Sub8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Sub8 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 x (Sub8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Sub8 x z)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Sub8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Add8 i (Sub8 x z)) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - i := v_0.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAdd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpSub8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - return true - } - // match: (Add8 (Sub8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Sub8 (Add8 x z) i) + // result: (Sub8 (Add8 x z) i) for { v_0 := v.Args[0] if v_0.Op != OpSub8 { @@ -2913,7 +1683,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AddArg(i) return true } - // match: (Add8 x (Sub8 z i:(Const8 ))) + // match: (Add8 x (Sub8 z i:(Const8 ))) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Sub8 (Add8 x z) i) for { @@ -2939,61 +1709,39 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { v.AddArg(i) return true } - // match: (Add8 x (Sub8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Sub8 (Add8 x z) i) + // match: (Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // cond: + // result: (Add8 (Const8 [int64(int8(c+d))]) x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { + v_0 := v.Args[0] + if v_0.Op != OpConst8 { break } - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) - v.AddArg(v0) - v.AddArg(i) - return true - } - // match: (Add8 (Sub8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Sub8 (Add8 x z) i) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub8 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAdd8 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if v_1_0.Type != t { break } - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpAdd8, t) - v0.AddArg(x) - v0.AddArg(z) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpAdd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c + d)) v.AddArg(v0) - v.AddArg(i) + v.AddArg(x) return true } - // match: (Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // cond: - // result: (Add8 (Const8 [int64(int8(c+d))]) x) + // result: (Sub8 (Const8 [int64(int8(c+d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -3002,7 +1750,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd8 { + if v_1.Op != OpSub8 { break } v_1_0 := v_1.Args[0] @@ -3014,16 +1762,16 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpAdd8) + v.reset(OpSub8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c + d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add8 (Const8 [c]) (Add8 x (Const8 [d]))) + // match: (Add8 (Const8 [c]) (Sub8 x (Const8 [d]))) // cond: - // result: (Add8 (Const8 [int64(int8(c+d))]) x) + // result: (Add8 (Const8 [int64(int8(c-d))]) x) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -3032,7 +1780,7 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd8 { + if v_1.Op != OpSub8 { break } x := v_1.Args[0] @@ -3046,236 +1794,56 @@ func rewriteValuegeneric_OpAdd8(v *Value) bool { d := v_1_1.AuxInt v.reset(OpAdd8) v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) + v0.AuxInt = int64(int8(c - d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Add8 (Add8 (Const8 [d]) x) (Const8 [c])) + return false +} +func rewriteValuegeneric_OpAddPtr(v *Value) bool { + // match: (AddPtr x (Const64 [c])) // cond: - // result: (Add8 (Const8 [int64(int8(c+d))]) x) + // result: (OffPtr x [c]) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + t := v.Type + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { + if v_1.Op != OpConst64 { break } c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = c v.AddArg(x) return true } - // match: (Add8 (Add8 x (Const8 [d])) (Const8 [c])) + // match: (AddPtr x (Const32 [c])) // cond: - // result: (Add8 (Const8 [int64(int8(c+d))]) x) + // result: (OffPtr x [c]) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt + t := v.Type + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { + if v_1.Op != OpConst32 { break } c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) + v.reset(OpOffPtr) + v.Type = t + v.AuxInt = c v.AddArg(x) return true } - // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) + return false +} +func rewriteValuegeneric_OpAnd16(v *Value) bool { + b := v.Block + _ = b + // match: (And16 (Const16 [c]) (Const16 [d])) // cond: - // result: (Sub8 (Const8 [int64(int8(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Sub8 (Const8 [d]) x) (Const8 [c])) - // cond: - // result: (Sub8 (Const8 [int64(int8(c+d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpSub8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c + d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Const8 [c]) (Sub8 x (Const8 [d]))) - // cond: - // result: (Add8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpSub8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Add8 (Sub8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (Add8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAddPtr(v *Value) bool { - // match: (AddPtr x (Const64 [c])) - // cond: - // result: (OffPtr x [c]) - for { - t := v.Type - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpOffPtr) - v.Type = t - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (AddPtr x (Const32 [c])) - // cond: - // result: (OffPtr x [c]) - for { - t := v.Type - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpOffPtr) - v.Type = t - v.AuxInt = c - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd16(v *Value) bool { - b := v.Block - _ = b - // match: (And16 (Const16 [c]) (Const16 [d])) - // cond: - // result: (Const16 [int64(int16(c&d))]) + // result: (Const16 [int64(int16(c&d))]) for { v_0 := v.Args[0] if v_0.Op != OpConst16 { @@ -3291,22 +1859,25 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AuxInt = int64(int16(c & d)) return true } - // match: (And16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (Const16 [int64(int16(c&d))]) + // match: (And16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (And16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c & d)) + if !(x.Op != OpConst16) { + break + } + v.reset(OpAnd16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (And16 x x) @@ -3339,23 +1910,6 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AddArg(x) return true } - // match: (And16 x (Const16 [-1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (And16 (Const16 [0]) _) // cond: // result: (Const16 [0]) @@ -3371,21 +1925,6 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AuxInt = 0 return true } - // match: (And16 _ (Const16 [0])) - // cond: - // result: (Const16 [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpConst16) - v.AuxInt = 0 - return true - } // match: (And16 x (And16 x y)) // cond: // result: (And16 x y) @@ -3440,7 +1979,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AddArg(y) return true } - // match: (And16 (And16 y x) x) + // match: (And16 (And16 x y) y) // cond: // result: (And16 x y) for { @@ -3448,9 +1987,9 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { if v_0.Op != OpAnd16 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpAnd16) @@ -3458,33 +1997,24 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AddArg(y) return true } - // match: (And16 (And16 i:(Const16 ) z) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) + // match: (And16 x l:(And16 _ _)) + // cond: (x.Op != OpAnd16 && x.Op != OpConst16) + // result: (And16 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - i := v_0.Args[0] - if i.Op != OpConst16 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAnd16 { break } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { + if !(x.Op != OpAnd16 && x.Op != OpConst16) { break } v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(l) + v.AddArg(x) return true } - // match: (And16 (And16 z i:(Const16 )) x) + // match: (And16 (And16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (And16 i (And16 z x)) for { @@ -3492,12 +2022,12 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { if v_0.Op != OpAnd16 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst16 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst16 && x.Op != OpConst16) { break @@ -3510,171 +2040,29 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { v.AddArg(v0) return true } - // match: (And16 x (And16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) + // match: (And16 (Const16 [c]) (And16 (Const16 [d]) x)) + // cond: + // result: (And16 (Const16 [int64(int16(c&d))]) x) for { - x := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpAnd16 { break } - i := v_1.Args[0] - if i.Op != OpConst16 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { break } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { + if v_1_0.Type != t { break } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And16 x (And16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (And16 i (And16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpAnd16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And16 (Const16 [c]) (And16 (Const16 [d]) x)) - // cond: - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And16 (Const16 [c]) (And16 x (Const16 [d]))) - // cond: - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And16 (And16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And16 (And16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (And16 (Const16 [int64(int16(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpAnd16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c & d)) @@ -3687,7 +2075,7 @@ func rewriteValuegeneric_OpAnd16(v *Value) bool { func rewriteValuegeneric_OpAnd32(v *Value) bool { b := v.Block _ = b - // match: (And32 (Const32 [c]) (Const32 [d])) + // match: (And32 (Const32 [c]) (Const32 [d])) // cond: // result: (Const32 [int64(int32(c&d))]) for { @@ -3705,22 +2093,25 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AuxInt = int64(int32(c & d)) return true } - // match: (And32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (Const32 [int64(int32(c&d))]) + // match: (And32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (And32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst32 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c & d)) + if !(x.Op != OpConst32) { + break + } + v.reset(OpAnd32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (And32 x x) @@ -3753,23 +2144,6 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AddArg(x) return true } - // match: (And32 x (Const32 [-1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (And32 (Const32 [0]) _) // cond: // result: (Const32 [0]) @@ -3785,21 +2159,6 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AuxInt = 0 return true } - // match: (And32 _ (Const32 [0])) - // cond: - // result: (Const32 [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } // match: (And32 x (And32 x y)) // cond: // result: (And32 x y) @@ -3854,7 +2213,7 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AddArg(y) return true } - // match: (And32 (And32 y x) x) + // match: (And32 (And32 x y) y) // cond: // result: (And32 x y) for { @@ -3862,9 +2221,9 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { if v_0.Op != OpAnd32 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpAnd32) @@ -3872,33 +2231,24 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AddArg(y) return true } - // match: (And32 (And32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) + // match: (And32 x l:(And32 _ _)) + // cond: (x.Op != OpAnd32 && x.Op != OpConst32) + // result: (And32 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - i := v_0.Args[0] - if i.Op != OpConst32 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAnd32 { break } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if !(x.Op != OpAnd32 && x.Op != OpConst32) { break } v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(l) + v.AddArg(x) return true } - // match: (And32 (And32 z i:(Const32 )) x) + // match: (And32 (And32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (And32 i (And32 z x)) for { @@ -3906,12 +2256,12 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { if v_0.Op != OpAnd32 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst32 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst32 && x.Op != OpConst32) { break @@ -3924,186 +2274,44 @@ func rewriteValuegeneric_OpAnd32(v *Value) bool { v.AddArg(v0) return true } - // match: (And32 x (And32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) + // match: (And32 (Const32 [c]) (And32 (Const32 [d]) x)) + // cond: + // result: (And32 (Const32 [int64(int32(c&d))]) x) for { - x := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpAnd32 { break } - i := v_1.Args[0] - if i.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { break } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if v_1_0.Type != t { break } + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c & d)) v.AddArg(v0) + v.AddArg(x) return true } - // match: (And32 x (And32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (And32 i (And32 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpAnd32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And32 (Const32 [c]) (And32 (Const32 [d]) x)) - // cond: - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And32 (Const32 [c]) (And32 x (Const32 [d]))) - // cond: - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd32 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And32 (And32 (Const32 [d]) x) (Const32 [c])) - // cond: - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And32 (And32 x (Const32 [d])) (Const32 [c])) - // cond: - // result: (And32 (Const32 [int64(int32(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpAnd64(v *Value) bool { - b := v.Block - _ = b - // match: (And64 (Const64 [c]) (Const64 [d])) - // cond: - // result: (Const64 [c&d]) + return false +} +func rewriteValuegeneric_OpAnd64(v *Value) bool { + b := v.Block + _ = b + // match: (And64 (Const64 [c]) (Const64 [d])) + // cond: + // result: (Const64 [c&d]) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -4119,22 +2327,25 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AuxInt = c & d return true } - // match: (And64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (Const64 [c&d]) + // match: (And64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (And64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c & d + if !(x.Op != OpConst64) { + break + } + v.reset(OpAnd64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (And64 x x) @@ -4167,23 +2378,6 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(x) return true } - // match: (And64 x (Const64 [-1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (And64 (Const64 [0]) _) // cond: // result: (Const64 [0]) @@ -4199,21 +2393,6 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AuxInt = 0 return true } - // match: (And64 _ (Const64 [0])) - // cond: - // result: (Const64 [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpConst64) - v.AuxInt = 0 - return true - } // match: (And64 x (And64 x y)) // cond: // result: (And64 x y) @@ -4268,7 +2447,7 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(y) return true } - // match: (And64 (And64 y x) x) + // match: (And64 (And64 x y) y) // cond: // result: (And64 x y) for { @@ -4276,9 +2455,9 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { if v_0.Op != OpAnd64 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpAnd64) @@ -4312,32 +2491,6 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(v2) return true } - // match: (And64 x (Const64 [y])) - // cond: nlz(y) + nto(y) == 64 && nto(y) >= 32 - // result: (Rsh64Ux64 (Lsh64x64 x (Const64 [nlz(y)])) (Const64 [nlz(y)])) - for { - t := v.Type - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - y := v_1.AuxInt - if !(nlz(y)+nto(y) == 64 && nto(y) >= 32) { - break - } - v.reset(OpRsh64Ux64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = nlz(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = nlz(y) - v.AddArg(v2) - return true - } // match: (And64 (Const64 [y]) x) // cond: nlo(y) + ntz(y) == 64 && ntz(y) >= 32 // result: (Lsh64x64 (Rsh64Ux64 x (Const64 [ntz(y)])) (Const64 [ntz(y)])) @@ -4364,30 +2517,21 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(v2) return true } - // match: (And64 x (Const64 [y])) - // cond: nlo(y) + ntz(y) == 64 && ntz(y) >= 32 - // result: (Lsh64x64 (Rsh64Ux64 x (Const64 [ntz(y)])) (Const64 [ntz(y)])) + // match: (And64 x l:(And64 _ _)) + // cond: (x.Op != OpAnd64 && x.Op != OpConst64) + // result: (And64 l x) for { - t := v.Type x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + l := v.Args[1] + if l.Op != OpAnd64 { break } - y := v_1.AuxInt - if !(nlo(y)+ntz(y) == 64 && ntz(y) >= 32) { + if !(x.Op != OpAnd64 && x.Op != OpConst64) { break } - v.reset(OpLsh64x64) - v0 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpConst64, t) - v1.AuxInt = ntz(y) - v0.AddArg(v1) - v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = ntz(y) - v.AddArg(v2) + v.reset(OpAnd64) + v.AddArg(l) + v.AddArg(x) return true } // match: (And64 (And64 i:(Const64 ) z) x) @@ -4416,100 +2560,22 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(v0) return true } - // match: (And64 (And64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) + // match: (And64 (Const64 [c]) (And64 (Const64 [d]) x)) + // cond: + // result: (And64 (Const64 [c&d]) x) for { v_0 := v.Args[0] - if v_0.Op != OpAnd64 { + if v_0.Op != OpConst64 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAnd64 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 x (And64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 x (And64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (And64 i (And64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpAnd64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And64 (Const64 [c]) (And64 (Const64 [d]) x)) - // cond: - // result: (And64 (Const64 [c&d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { break } if v_1_0.Type != t { @@ -4524,102 +2590,12 @@ func rewriteValuegeneric_OpAnd64(v *Value) bool { v.AddArg(x) return true } - // match: (And64 (Const64 [c]) (And64 x (Const64 [d]))) - // cond: - // result: (And64 (Const64 [c&d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And64 (And64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (And64 (Const64 [c&d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And64 (And64 x (Const64 [d])) (Const64 [c])) - // cond: - // result: (And64 (Const64 [c&d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c & d - v.AddArg(v0) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpAnd8(v *Value) bool { b := v.Block _ = b - // match: (And8 (Const8 [c]) (Const8 [d])) + // match: (And8 (Const8 [c]) (Const8 [d])) // cond: // result: (Const8 [int64(int8(c&d))]) for { @@ -4637,25 +2613,28 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AuxInt = int64(int8(c & d)) return true } - // match: (And8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (Const8 [int64(int8(c&d))]) + // match: (And8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (And8 (Const8 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst8 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c & d)) + if !(x.Op != OpConst8) { + break + } + v.reset(OpAnd8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (And8 x x) + // match: (And8 x x) // cond: // result: x for { @@ -4668,7 +2647,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(x) return true } - // match: (And8 (Const8 [-1]) x) + // match: (And8 (Const8 [-1]) x) // cond: // result: x for { @@ -4685,24 +2664,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(x) return true } - // match: (And8 x (Const8 [-1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (And8 (Const8 [0]) _) + // match: (And8 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -4717,22 +2679,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AuxInt = 0 return true } - // match: (And8 _ (Const8 [0])) - // cond: - // result: (Const8 [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpConst8) - v.AuxInt = 0 - return true - } - // match: (And8 x (And8 x y)) + // match: (And8 x (And8 x y)) // cond: // result: (And8 x y) for { @@ -4750,7 +2697,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(y) return true } - // match: (And8 x (And8 y x)) + // match: (And8 x (And8 y x)) // cond: // result: (And8 x y) for { @@ -4768,7 +2715,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(y) return true } - // match: (And8 (And8 x y) x) + // match: (And8 (And8 x y) x) // cond: // result: (And8 x y) for { @@ -4786,7 +2733,7 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(y) return true } - // match: (And8 (And8 y x) x) + // match: (And8 (And8 x y) y) // cond: // result: (And8 x y) for { @@ -4794,9 +2741,9 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { if v_0.Op != OpAnd8 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpAnd8) @@ -4804,33 +2751,24 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(y) return true } - // match: (And8 (And8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) + // match: (And8 x l:(And8 _ _)) + // cond: (x.Op != OpAnd8 && x.Op != OpConst8) + // result: (And8 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - i := v_0.Args[0] - if i.Op != OpConst8 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpAnd8 { break } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if !(x.Op != OpAnd8 && x.Op != OpConst8) { break } v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(l) + v.AddArg(x) return true } - // match: (And8 (And8 z i:(Const8 )) x) + // match: (And8 (And8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (And8 i (And8 z x)) for { @@ -4838,12 +2776,12 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { if v_0.Op != OpAnd8 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst8 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst8 && x.Op != OpConst8) { break @@ -4856,171 +2794,29 @@ func rewriteValuegeneric_OpAnd8(v *Value) bool { v.AddArg(v0) return true } - // match: (And8 x (And8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) + // match: (And8 (Const8 [c]) (And8 (Const8 [d]) x)) + // cond: + // result: (And8 (Const8 [int64(int8(c&d))]) x) for { - x := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpAnd8 { break } - i := v_1.Args[0] - if i.Op != OpConst8 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { break } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if v_1_0.Type != t { break } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And8 x (And8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (And8 i (And8 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpAnd8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpAnd8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (And8 (Const8 [c]) (And8 (Const8 [d]) x)) - // cond: - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And8 (Const8 [c]) (And8 x (Const8 [d]))) - // cond: - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAnd8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And8 (And8 (Const8 [d]) x) (Const8 [c])) - // cond: - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAnd8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c & d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (And8 (And8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (And8 (Const8 [int64(int8(c&d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpAnd8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c & d)) @@ -5379,7 +3175,7 @@ func rewriteValuegeneric_OpCom64(v *Value) bool { return false } func rewriteValuegeneric_OpCom8(v *Value) bool { - // match: (Com8 (Com8 x)) + // match: (Com8 (Com8 x)) // cond: // result: x for { @@ -5642,7 +3438,7 @@ func rewriteValuegeneric_OpDiv16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div16 (Const16 [c]) (Const16 [d])) + // match: (Div16 (Const16 [c]) (Const16 [d])) // cond: d != 0 // result: (Const16 [int64(int16(c)/int16(d))]) for { @@ -5793,7 +3589,7 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { _ = config types := &b.Func.Config.Types _ = types - // match: (Div16u (Const16 [c]) (Const16 [d])) + // match: (Div16u (Const16 [c]) (Const16 [d])) // cond: d != 0 // result: (Const16 [int64(int16(uint16(c)/uint16(d)))]) for { @@ -5974,7 +3770,7 @@ func rewriteValuegeneric_OpDiv32(v *Value) bool { _ = config types := &b.Func.Config.Types _ = types - // match: (Div32 (Const32 [c]) (Const32 [d])) + // match: (Div32 (Const32 [c]) (Const32 [d])) // cond: d != 0 // result: (Const32 [int64(int32(c)/int32(d))]) for { @@ -6252,7 +4048,7 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { _ = config types := &b.Func.Config.Types _ = types - // match: (Div32u (Const32 [c]) (Const32 [d])) + // match: (Div32u (Const32 [c]) (Const32 [d])) // cond: d != 0 // result: (Const32 [int64(int32(uint32(c)/uint32(d)))]) for { @@ -6488,7 +4284,7 @@ func rewriteValuegeneric_OpDiv64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div64 (Const64 [c]) (Const64 [d])) + // match: (Div64 (Const64 [c]) (Const64 [d])) // cond: d != 0 // result: (Const64 [c/d]) for { @@ -6727,7 +4523,7 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { _ = config types := &b.Func.Config.Types _ = types - // match: (Div64u (Const64 [c]) (Const64 [d])) + // match: (Div64u (Const64 [c]) (Const64 [d])) // cond: d != 0 // result: (Const64 [int64(uint64(c)/uint64(d))]) for { @@ -6861,7 +4657,7 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8 (Const8 [c]) (Const8 [d])) + // match: (Div8 (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(c)/int8(d))]) for { @@ -6882,7 +4678,7 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v.AuxInt = int64(int8(c) / int8(d)) return true } - // match: (Div8 n (Const8 [c])) + // match: (Div8 n (Const8 [c])) // cond: c < 0 && c != -1<<7 // result: (Neg8 (Div8 n (Const8 [-c]))) for { @@ -6905,7 +4701,7 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v.AddArg(v0) return true } - // match: (Div8 x (Const8 [-1<<7 ])) + // match: (Div8 x (Const8 [-1<<7 ])) // cond: // result: (Rsh8Ux64 (And8 x (Neg8 x)) (Const64 [7 ])) for { @@ -6930,7 +4726,7 @@ func rewriteValuegeneric_OpDiv8(v *Value) bool { v.AddArg(v2) return true } - // match: (Div8 n (Const8 [c])) + // match: (Div8 n (Const8 [c])) // cond: isPowerOfTwo(c) // result: (Rsh8x64 (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [ 8-log2(c)]))) (Const64 [log2(c)])) for { @@ -7010,7 +4806,7 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Div8u (Const8 [c]) (Const8 [d])) + // match: (Div8u (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(uint8(c)/uint8(d)))]) for { @@ -7031,7 +4827,7 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { v.AuxInt = int64(int8(uint8(c) / uint8(d))) return true } - // match: (Div8u n (Const8 [c])) + // match: (Div8u n (Const8 [c])) // cond: isPowerOfTwo(c&0xff) // result: (Rsh8Ux64 n (Const64 [log2(c&0xff)])) for { @@ -7127,92 +4923,23 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v.AddArg(x) return true } - // match: (Eq16 (Const16 [c]) (Add16 x (Const16 [d]))) - // cond: - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq16 (Add16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) + // match: (Eq16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Eq16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } - if v_1.Type != t { - break - } + t := v_1.Type c := v_1.AuxInt - v.reset(OpEq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq16 (Add16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Eq16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { + if !(x.Op != OpConst16) { break } - c := v_1.AuxInt v.reset(OpEq16) v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true @@ -7235,24 +4962,6 @@ func rewriteValuegeneric_OpEq16(v *Value) bool { v.AuxInt = b2i(c == d) return true } - // match: (Eq16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } return false } func rewriteValuegeneric_OpEq32(v *Value) bool { @@ -7300,102 +5009,33 @@ func rewriteValuegeneric_OpEq32(v *Value) bool { v.AddArg(x) return true } - // match: (Eq32 (Const32 [c]) (Add32 x (Const32 [d]))) - // cond: - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) + // match: (Eq32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Eq32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + if v_1.Op != OpConst32 { break } - if v_1_1.Type != t { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst32) { break } - d := v_1_1.AuxInt v.reset(OpEq32) v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true } - // match: (Eq32 (Add32 (Const32 [d]) x) (Const32 [c])) + // match: (Eq32 (Const32 [c]) (Const32 [d])) // cond: - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) + // result: (ConstBool [b2i(c == d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq32 (Add32 x (Const32 [d])) (Const32 [c])) - // cond: - // result: (Eq32 (Const32 [int64(int32(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpEq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt @@ -7408,24 +5048,6 @@ func rewriteValuegeneric_OpEq32(v *Value) bool { v.AuxInt = b2i(c == d) return true } - // match: (Eq32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } return false } func rewriteValuegeneric_OpEq64(v *Value) bool { @@ -7473,92 +5095,23 @@ func rewriteValuegeneric_OpEq64(v *Value) bool { v.AddArg(x) return true } - // match: (Eq64 (Const64 [c]) (Add64 x (Const64 [d]))) - // cond: - // result: (Eq64 (Const64 [c-d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq64 (Add64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (Eq64 (Const64 [c-d]) x) + // match: (Eq64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Eq64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break } - if v_1.Type != t { - break - } + t := v_1.Type c := v_1.AuxInt - v.reset(OpEq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq64 (Add64 x (Const64 [d])) (Const64 [c])) - // cond: - // result: (Eq64 (Const64 [c-d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { + if !(x.Op != OpConst64) { break } - c := v_1.AuxInt v.reset(OpEq64) v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true @@ -7581,30 +5134,12 @@ func rewriteValuegeneric_OpEq64(v *Value) bool { v.AuxInt = b2i(c == d) return true } - // match: (Eq64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } return false } func rewriteValuegeneric_OpEq8(v *Value) bool { b := v.Block _ = b - // match: (Eq8 x x) + // match: (Eq8 x x) // cond: // result: (ConstBool [1]) for { @@ -7616,7 +5151,7 @@ func rewriteValuegeneric_OpEq8(v *Value) bool { v.AuxInt = 1 return true } - // match: (Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) + // match: (Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // cond: // result: (Eq8 (Const8 [int64(int8(c-d))]) x) for { @@ -7646,97 +5181,28 @@ func rewriteValuegeneric_OpEq8(v *Value) bool { v.AddArg(x) return true } - // match: (Eq8 (Const8 [c]) (Add8 x (Const8 [d]))) - // cond: - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq8 (Add8 (Const8 [d]) x) (Const8 [c])) - // cond: - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) + // match: (Eq8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Eq8 (Const8 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst8 { break } - if v_1.Type != t { - break - } + t := v_1.Type c := v_1.AuxInt - v.reset(OpEq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Eq8 (Add8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (Eq8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { + if !(x.Op != OpConst8) { break } - c := v_1.AuxInt v.reset(OpEq8) v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true } - // match: (Eq8 (Const8 [c]) (Const8 [d])) + // match: (Eq8 (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(c == d)]) for { @@ -7754,24 +5220,6 @@ func rewriteValuegeneric_OpEq8(v *Value) bool { v.AuxInt = b2i(c == d) return true } - // match: (Eq8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (ConstBool [b2i(c == d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c == d) - return true - } return false } func rewriteValuegeneric_OpEqB(v *Value) bool { @@ -7918,28 +5366,6 @@ func rewriteValuegeneric_OpEqPtr(v *Value) bool { v.AuxInt = b2i(a == b) return true } - // match: (EqPtr (Addr {b} x) (Addr {a} x)) - // cond: - // result: (ConstBool [b2i(a == b)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAddr { - break - } - b := v_0.Aux - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpAddr { - break - } - a := v_1.Aux - if x != v_1.Args[0] { - break - } - v.reset(OpConstBool) - v.AuxInt = b2i(a == b) - return true - } return false } func rewriteValuegeneric_OpEqSlice(v *Value) bool { @@ -8090,7 +5516,7 @@ func rewriteValuegeneric_OpGeq64U(v *Value) bool { return false } func rewriteValuegeneric_OpGeq8(v *Value) bool { - // match: (Geq8 (Const8 [c]) (Const8 [d])) + // match: (Geq8 (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(c >= d)]) for { @@ -8111,7 +5537,7 @@ func rewriteValuegeneric_OpGeq8(v *Value) bool { return false } func rewriteValuegeneric_OpGeq8U(v *Value) bool { - // match: (Geq8U (Const8 [c]) (Const8 [d])) + // match: (Geq8U (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(uint8(c) >= uint8(d))]) for { @@ -8258,7 +5684,7 @@ func rewriteValuegeneric_OpGreater64U(v *Value) bool { return false } func rewriteValuegeneric_OpGreater8(v *Value) bool { - // match: (Greater8 (Const8 [c]) (Const8 [d])) + // match: (Greater8 (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(c > d)]) for { @@ -8279,7 +5705,7 @@ func rewriteValuegeneric_OpGreater8(v *Value) bool { return false } func rewriteValuegeneric_OpGreater8U(v *Value) bool { - // match: (Greater8U (Const8 [c]) (Const8 [d])) + // match: (Greater8U (Const8 [c]) (Const8 [d])) // cond: // result: (ConstBool [b2i(uint8(c) > uint8(d))]) for { @@ -8377,7 +5803,7 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { return false } func rewriteValuegeneric_OpIsInBounds(v *Value) bool { - // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c])) + // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c])) // cond: (1 << 8) <= c // result: (ConstBool [1]) for { @@ -8397,7 +5823,7 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c])) + // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c])) // cond: (1 << 8) <= c // result: (ConstBool [1]) for { @@ -8469,7 +5895,7 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 0 return true } - // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) + // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { @@ -8494,32 +5920,7 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (And8 _ (Const8 [c])) (Const8 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd8 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - c := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) + // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { @@ -8548,25 +5949,25 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to16 (And8 _ (Const8 [c]))) (Const16 [d])) + // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to16 { + if v_0.Op != OpZeroExt8to32 { break } v_0_0 := v_0.Args[0] if v_0_0.Op != OpAnd8 { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpConst8 { break } - c := v_0_0_1.AuxInt + c := v_0_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt @@ -8577,12 +5978,12 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) + // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to32 { + if v_0.Op != OpZeroExt8to64 { break } v_0_0 := v_0.Args[0] @@ -8595,7 +5996,7 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { } c := v_0_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt @@ -8606,25 +6007,21 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to32 (And8 _ (Const8 [c]))) (Const32 [d])) + // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to32 { + if v_0.Op != OpAnd16 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { + if v_0_0.Op != OpConst16 { break } - c := v_0_0_1.AuxInt + c := v_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt @@ -8635,25 +6032,25 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) + // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to64 { + if v_0.Op != OpZeroExt16to32 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { + if v_0_0.Op != OpAnd16 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst8 { + if v_0_0_0.Op != OpConst16 { break } c := v_0_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt @@ -8664,23 +6061,23 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt8to64 (And8 _ (Const8 [c]))) (Const64 [d])) + // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt8to64 { + if v_0.Op != OpZeroExt16to64 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd8 { + if v_0_0.Op != OpAnd16 { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst8 { + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpConst16 { break } - c := v_0_0_1.AuxInt + c := v_0_0_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpConst64 { break @@ -8693,21 +6090,21 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) + // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd16 { + if v_0.Op != OpAnd32 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { + if v_0_0.Op != OpConst32 { break } c := v_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt @@ -8718,21 +6115,25 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (And16 _ (Const16 [c])) (Const16 [d])) + // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd16 { + if v_0.Op != OpZeroExt32to64 { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpAnd32 { break } - c := v_0_1.AuxInt + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpConst32 { + break + } + c := v_0_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst16 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt @@ -8743,25 +6144,21 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) + // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) // cond: 0 <= c && c < d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to32 { + if v_0.Op != OpAnd64 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst16 { + if v_0_0.Op != OpConst64 { break } - c := v_0_0_0.AuxInt + c := v_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt @@ -8772,95 +6169,106 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt16to32 (And16 _ (Const16 [c]))) (Const32 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) + // match: (IsInBounds (Const32 [c]) (Const32 [d])) + // cond: + // result: (ConstBool [b2i(0 <= c && c < d)]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { - break - } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst16 { + if v_0.Op != OpConst32 { break } - c := v_0_0_1.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpConst32 { break } d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(0 <= c && c < d) return true } - // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) + // match: (IsInBounds (Const64 [c]) (Const64 [d])) + // cond: + // result: (ConstBool [b2i(0 <= c && c < d)]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst16 { + if v_0.Op != OpConst64 { break } - c := v_0_0_0.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpConst64 { break } d := v_1.AuxInt - if !(0 <= c && c < d) { + v.reset(OpConstBool) + v.AuxInt = b2i(0 <= c && c < d) + return true + } + // match: (IsInBounds (Mod32u _ y) y) + // cond: + // result: (ConstBool [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpMod32u { + break + } + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt16to64 (And16 _ (Const16 [c]))) (Const64 [d])) - // cond: 0 <= c && c < d + // match: (IsInBounds (Mod64u _ y) y) + // cond: // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt16to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd16 { + if v_0.Op != OpMod64u { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst16 { + y := v_0.Args[1] + if y != v.Args[1] { break } - c := v_0_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v.reset(OpConstBool) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValuegeneric_OpIsNonNil(v *Value) bool { + // match: (IsNonNil (ConstNil)) + // cond: + // result: (ConstBool [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConstNil { break } - d := v_1.AuxInt - if !(0 <= c && c < d) { + v.reset(OpConstBool) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool { + // match: (IsSliceInBounds x x) + // cond: + // result: (ConstBool [1]) + for { + x := v.Args[0] + if x != v.Args[1] { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) - // cond: 0 <= c && c < d + // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) + // cond: 0 <= c && c <= d // result: (ConstBool [1]) for { v_0 := v.Args[0] @@ -8877,499 +6285,361 @@ func rewriteValuegeneric_OpIsInBounds(v *Value) bool { break } d := v_1.AuxInt - if !(0 <= c && c < d) { + if !(0 <= c && c <= d) { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (And32 _ (Const32 [c])) (Const32 [d])) - // cond: 0 <= c && c < d + // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) + // cond: 0 <= c && c <= d // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd32 { + if v_0.Op != OpAnd64 { break } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst64 { break } - c := v_0_1.AuxInt + c := v_0_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt - if !(0 <= c && c < d) { + if !(0 <= c && c <= d) { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) - // cond: 0 <= c && c < d + // match: (IsSliceInBounds (Const32 [0]) _) + // cond: // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt32to64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd32 { - break - } - v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpConst32 { - break - } - c := v_0_0_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.Op != OpConst32 { break } - d := v_1.AuxInt - if !(0 <= c && c < d) { + if v_0.AuxInt != 0 { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (ZeroExt32to64 (And32 _ (Const32 [c]))) (Const64 [d])) - // cond: 0 <= c && c < d + // match: (IsSliceInBounds (Const64 [0]) _) + // cond: // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpZeroExt32to64 { + if v_0.Op != OpConst64 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpAnd32 { + if v_0.AuxInt != 0 { break } - v_0_0_1 := v_0_0.Args[1] - if v_0_0_1.Op != OpConst32 { + v.reset(OpConstBool) + v.AuxInt = 1 + return true + } + // match: (IsSliceInBounds (Const32 [c]) (Const32 [d])) + // cond: + // result: (ConstBool [b2i(0 <= c && c <= d)]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst32 { break } - c := v_0_0_1.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(0 <= c && c <= d) return true } - // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) - // cond: 0 <= c && c < d - // result: (ConstBool [1]) + // match: (IsSliceInBounds (Const64 [c]) (Const64 [d])) + // cond: + // result: (ConstBool [b2i(0 <= c && c <= d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + if v_0.Op != OpConst64 { break } - c := v_0_0.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpConst64 { break } d := v_1.AuxInt - if !(0 <= c && c < d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(0 <= c && c <= d) return true } - // match: (IsInBounds (And64 _ (Const64 [c])) (Const64 [d])) - // cond: 0 <= c && c < d + // match: (IsSliceInBounds (SliceLen x) (SliceCap x)) + // cond: // result: (ConstBool [1]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + if v_0.Op != OpSliceLen { break } - c := v_0_1.AuxInt + x := v_0.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpSliceCap { break } - d := v_1.AuxInt - if !(0 <= c && c < d) { + if x != v_1.Args[0] { break } v.reset(OpConstBool) v.AuxInt = 1 return true } - // match: (IsInBounds (Const32 [c]) (Const32 [d])) + return false +} +func rewriteValuegeneric_OpLeq16(v *Value) bool { + // match: (Leq16 (Const16 [c]) (Const16 [d])) // cond: - // result: (ConstBool [b2i(0 <= c && c < d)]) + // result: (ConstBool [b2i(c <= d)]) for { v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst16 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(0 <= c && c < d) + v.AuxInt = b2i(c <= d) return true } - // match: (IsInBounds (Const64 [c]) (Const64 [d])) + return false +} +func rewriteValuegeneric_OpLeq16U(v *Value) bool { + // match: (Leq16U (Const16 [c]) (Const16 [d])) // cond: - // result: (ConstBool [b2i(0 <= c && c < d)]) + // result: (ConstBool [b2i(uint16(c) <= uint16(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst16 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(0 <= c && c < d) + v.AuxInt = b2i(uint16(c) <= uint16(d)) return true } - // match: (IsInBounds (Mod32u _ y) y) + return false +} +func rewriteValuegeneric_OpLeq32(v *Value) bool { + // match: (Leq32 (Const32 [c]) (Const32 [d])) // cond: - // result: (ConstBool [1]) + // result: (ConstBool [b2i(c <= d)]) for { v_0 := v.Args[0] - if v_0.Op != OpMod32u { + if v_0.Op != OpConst32 { break } - y := v_0.Args[1] - if y != v.Args[1] { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { break } + d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(c <= d) return true } - // match: (IsInBounds (Mod64u _ y) y) + return false +} +func rewriteValuegeneric_OpLeq32U(v *Value) bool { + // match: (Leq32U (Const32 [c]) (Const32 [d])) // cond: - // result: (ConstBool [1]) + // result: (ConstBool [b2i(uint32(c) <= uint32(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpMod64u { + if v_0.Op != OpConst32 { break } - y := v_0.Args[1] - if y != v.Args[1] { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { break } + d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(uint32(c) <= uint32(d)) return true } return false } -func rewriteValuegeneric_OpIsNonNil(v *Value) bool { - // match: (IsNonNil (ConstNil)) +func rewriteValuegeneric_OpLeq64(v *Value) bool { + // match: (Leq64 (Const64 [c]) (Const64 [d])) // cond: - // result: (ConstBool [0]) + // result: (ConstBool [b2i(c <= d)]) for { v_0 := v.Args[0] - if v_0.Op != OpConstNil { + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } + d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = 0 + v.AuxInt = b2i(c <= d) return true } return false } -func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool { - // match: (IsSliceInBounds x x) +func rewriteValuegeneric_OpLeq64U(v *Value) bool { + // match: (Leq64U (Const64 [c]) (Const64 [d])) // cond: - // result: (ConstBool [1]) + // result: (ConstBool [b2i(uint64(c) <= uint64(d))]) for { - x := v.Args[0] - if x != v.Args[1] { + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } + d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(uint64(c) <= uint64(d)) return true } - // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) - // cond: 0 <= c && c <= d - // result: (ConstBool [1]) + return false +} +func rewriteValuegeneric_OpLeq8(v *Value) bool { + // match: (Leq8 (Const8 [c]) (Const8 [d])) + // cond: + // result: (ConstBool [b2i(c <= d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + if v_0.Op != OpConst8 { break } - c := v_0_0.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst8 { break } d := v_1.AuxInt - if !(0 <= c && c <= d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(c <= d) return true } - // match: (IsSliceInBounds (And32 _ (Const32 [c])) (Const32 [d])) - // cond: 0 <= c && c <= d - // result: (ConstBool [1]) + return false +} +func rewriteValuegeneric_OpLeq8U(v *Value) bool { + // match: (Leq8U (Const8 [c]) (Const8 [d])) + // cond: + // result: (ConstBool [b2i(uint8(c) <= uint8(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + if v_0.Op != OpConst8 { break } - c := v_0_1.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst8 { break } d := v_1.AuxInt - if !(0 <= c && c <= d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(uint8(c) <= uint8(d)) return true } - // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) - // cond: 0 <= c && c <= d - // result: (ConstBool [1]) + return false +} +func rewriteValuegeneric_OpLess16(v *Value) bool { + // match: (Less16 (Const16 [c]) (Const16 [d])) + // cond: + // result: (ConstBool [b2i(c < d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + if v_0.Op != OpConst16 { break } - c := v_0_0.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt - if !(0 <= c && c <= d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(c < d) return true } - // match: (IsSliceInBounds (And64 _ (Const64 [c])) (Const64 [d])) - // cond: 0 <= c && c <= d - // result: (ConstBool [1]) + return false +} +func rewriteValuegeneric_OpLess16U(v *Value) bool { + // match: (Less16U (Const16 [c]) (Const16 [d])) + // cond: + // result: (ConstBool [b2i(uint16(c) < uint16(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + if v_0.Op != OpConst16 { break } - c := v_0_1.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst16 { break } d := v_1.AuxInt - if !(0 <= c && c <= d) { - break - } v.reset(OpConstBool) - v.AuxInt = 1 + v.AuxInt = b2i(uint16(c) < uint16(d)) return true } - // match: (IsSliceInBounds (Const32 [0]) _) + return false +} +func rewriteValuegeneric_OpLess32(v *Value) bool { + // match: (Less32 (Const32 [c]) (Const32 [d])) // cond: - // result: (ConstBool [1]) + // result: (ConstBool [b2i(c < d)]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { break } - if v_0.AuxInt != 0 { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32 { break } + d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsSliceInBounds (Const64 [0]) _) - // cond: - // result: (ConstBool [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - if v_0.AuxInt != 0 { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - // match: (IsSliceInBounds (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(0 <= c && c <= d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(0 <= c && c <= d) - return true - } - // match: (IsSliceInBounds (Const64 [c]) (Const64 [d])) - // cond: - // result: (ConstBool [b2i(0 <= c && c <= d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(0 <= c && c <= d) - return true - } - // match: (IsSliceInBounds (SliceLen x) (SliceCap x)) - // cond: - // result: (ConstBool [1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpSliceLen { - break - } - x := v_0.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpSliceCap { - break - } - if x != v_1.Args[0] { - break - } - v.reset(OpConstBool) - v.AuxInt = 1 - return true - } - return false -} -func rewriteValuegeneric_OpLeq16(v *Value) bool { - // match: (Leq16 (Const16 [c]) (Const16 [d])) - // cond: - // result: (ConstBool [b2i(c <= d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c <= d) - return true - } - return false -} -func rewriteValuegeneric_OpLeq16U(v *Value) bool { - // match: (Leq16U (Const16 [c]) (Const16 [d])) - // cond: - // result: (ConstBool [b2i(uint16(c) <= uint16(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(uint16(c) <= uint16(d)) - return true - } - return false -} -func rewriteValuegeneric_OpLeq32(v *Value) bool { - // match: (Leq32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(c <= d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c <= d) + v.AuxInt = b2i(c < d) return true } return false } -func rewriteValuegeneric_OpLeq32U(v *Value) bool { - // match: (Leq32U (Const32 [c]) (Const32 [d])) +func rewriteValuegeneric_OpLess32U(v *Value) bool { + // match: (Less32U (Const32 [c]) (Const32 [d])) // cond: - // result: (ConstBool [b2i(uint32(c) <= uint32(d))]) + // result: (ConstBool [b2i(uint32(c) < uint32(d))]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { @@ -9382,15 +6652,15 @@ func rewriteValuegeneric_OpLeq32U(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(uint32(c) <= uint32(d)) + v.AuxInt = b2i(uint32(c) < uint32(d)) return true } return false } -func rewriteValuegeneric_OpLeq64(v *Value) bool { - // match: (Leq64 (Const64 [c]) (Const64 [d])) +func rewriteValuegeneric_OpLess64(v *Value) bool { + // match: (Less64 (Const64 [c]) (Const64 [d])) // cond: - // result: (ConstBool [b2i(c <= d)]) + // result: (ConstBool [b2i(c < d)]) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -9403,15 +6673,15 @@ func rewriteValuegeneric_OpLeq64(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(c <= d) + v.AuxInt = b2i(c < d) return true } return false } -func rewriteValuegeneric_OpLeq64U(v *Value) bool { - // match: (Leq64U (Const64 [c]) (Const64 [d])) +func rewriteValuegeneric_OpLess64U(v *Value) bool { + // match: (Less64U (Const64 [c]) (Const64 [d])) // cond: - // result: (ConstBool [b2i(uint64(c) <= uint64(d))]) + // result: (ConstBool [b2i(uint64(c) < uint64(d))]) for { v_0 := v.Args[0] if v_0.Op != OpConst64 { @@ -9424,15 +6694,15 @@ func rewriteValuegeneric_OpLeq64U(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(uint64(c) <= uint64(d)) + v.AuxInt = b2i(uint64(c) < uint64(d)) return true } return false } -func rewriteValuegeneric_OpLeq8(v *Value) bool { - // match: (Leq8 (Const8 [c]) (Const8 [d])) +func rewriteValuegeneric_OpLess8(v *Value) bool { + // match: (Less8 (Const8 [c]) (Const8 [d])) // cond: - // result: (ConstBool [b2i(c <= d)]) + // result: (ConstBool [b2i(c < d)]) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -9445,15 +6715,15 @@ func rewriteValuegeneric_OpLeq8(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(c <= d) + v.AuxInt = b2i(c < d) return true } return false } -func rewriteValuegeneric_OpLeq8U(v *Value) bool { - // match: (Leq8U (Const8 [c]) (Const8 [d])) +func rewriteValuegeneric_OpLess8U(v *Value) bool { + // match: (Less8U (Const8 [c]) (Const8 [d])) // cond: - // result: (ConstBool [b2i(uint8(c) <= uint8(d))]) + // result: (ConstBool [b2i(uint8(c) < uint8(d))]) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -9466,234 +6736,66 @@ func rewriteValuegeneric_OpLeq8U(v *Value) bool { } d := v_1.AuxInt v.reset(OpConstBool) - v.AuxInt = b2i(uint8(c) <= uint8(d)) + v.AuxInt = b2i(uint8(c) < uint8(d)) return true } return false } -func rewriteValuegeneric_OpLess16(v *Value) bool { - // match: (Less16 (Const16 [c]) (Const16 [d])) - // cond: - // result: (ConstBool [b2i(c < d)]) +func rewriteValuegeneric_OpLoad(v *Value) bool { + b := v.Block + _ = b + fe := b.Func.fe + _ = fe + // match: (Load p1 (Store {t2} p2 x _)) + // cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size() + // result: x for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { + t1 := v.Type + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + t2 := v_1.Aux + p2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && t1.Size() == t2.(Type).Size()) { break } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c < d) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpLess16U(v *Value) bool { - // match: (Less16U (Const16 [c]) (Const16 [d])) - // cond: - // result: (ConstBool [b2i(uint16(c) < uint16(d))]) + // match: (Load _ _) + // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) + // result: (StructMake0) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + t := v.Type + if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { break } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(uint16(c) < uint16(d)) + v.reset(OpStructMake0) return true } - return false -} -func rewriteValuegeneric_OpLess32(v *Value) bool { - // match: (Less32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(c < d)]) + // match: (Load ptr mem) + // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) + // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { break } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c < d) - return true - } - return false -} -func rewriteValuegeneric_OpLess32U(v *Value) bool { - // match: (Less32U (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(uint32(c) < uint32(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(uint32(c) < uint32(d)) - return true - } - return false -} -func rewriteValuegeneric_OpLess64(v *Value) bool { - // match: (Less64 (Const64 [c]) (Const64 [d])) - // cond: - // result: (ConstBool [b2i(c < d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c < d) - return true - } - return false -} -func rewriteValuegeneric_OpLess64U(v *Value) bool { - // match: (Less64U (Const64 [c]) (Const64 [d])) - // cond: - // result: (ConstBool [b2i(uint64(c) < uint64(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(uint64(c) < uint64(d)) - return true - } - return false -} -func rewriteValuegeneric_OpLess8(v *Value) bool { - // match: (Less8 (Const8 [c]) (Const8 [d])) - // cond: - // result: (ConstBool [b2i(c < d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c < d) - return true - } - return false -} -func rewriteValuegeneric_OpLess8U(v *Value) bool { - // match: (Less8U (Const8 [c]) (Const8 [d])) - // cond: - // result: (ConstBool [b2i(uint8(c) < uint8(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(uint8(c) < uint8(d)) - return true - } - return false -} -func rewriteValuegeneric_OpLoad(v *Value) bool { - b := v.Block - _ = b - fe := b.Func.fe - _ = fe - // match: (Load p1 (Store {t2} p2 x _)) - // cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size() - // result: x - for { - t1 := v.Type - p1 := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpStore { - break - } - t2 := v_1.Aux - p2 := v_1.Args[0] - x := v_1.Args[1] - if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && t1.Size() == t2.(Type).Size()) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Load _ _) - // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) - // result: (StructMake0) - for { - t := v.Type - if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake0) - return true - } - // match: (Load ptr mem) - // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) - // result: (StructMake1 (Load (OffPtr [0] ptr) mem)) - for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) { - break - } - v.reset(OpStructMake1) - v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) - v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) - v1.AuxInt = 0 - v1.AddArg(ptr) - v0.AddArg(v1) - v0.AddArg(mem) - v.AddArg(v0) + v.reset(OpStructMake1) + v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0)) + v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo()) + v1.AuxInt = 0 + v1.AddArg(ptr) + v0.AddArg(v1) + v0.AddArg(mem) + v.AddArg(v0) return true } // match: (Load ptr mem) @@ -9831,7 +6933,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool { func rewriteValuegeneric_OpLsh16x16(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x16 x (Const16 [c])) + // match: (Lsh16x16 x (Const16 [c])) // cond: // result: (Lsh16x64 x (Const64 [int64(uint16(c))])) for { @@ -9849,7 +6951,7 @@ func rewriteValuegeneric_OpLsh16x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh16x16 (Const16 [0]) _) + // match: (Lsh16x16 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -9869,7 +6971,7 @@ func rewriteValuegeneric_OpLsh16x16(v *Value) bool { func rewriteValuegeneric_OpLsh16x32(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x32 x (Const32 [c])) + // match: (Lsh16x32 x (Const32 [c])) // cond: // result: (Lsh16x64 x (Const64 [int64(uint32(c))])) for { @@ -9887,7 +6989,7 @@ func rewriteValuegeneric_OpLsh16x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh16x32 (Const16 [0]) _) + // match: (Lsh16x32 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -9909,7 +7011,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) + // match: (Lsh16x64 (Const16 [c]) (Const64 [d])) // cond: // result: (Const16 [int64(int16(c) << uint64(d))]) for { @@ -9927,7 +7029,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { v.AuxInt = int64(int16(c) << uint64(d)) return true } - // match: (Lsh16x64 x (Const64 [0])) + // match: (Lsh16x64 x (Const64 [0])) // cond: // result: x for { @@ -9944,7 +7046,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh16x64 (Const16 [0]) _) + // match: (Lsh16x64 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -9959,7 +7061,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh16x64 _ (Const64 [c])) + // match: (Lsh16x64 _ (Const64 [c])) // cond: uint64(c) >= 16 // result: (Const16 [0]) for { @@ -10048,7 +7150,7 @@ func rewriteValuegeneric_OpLsh16x64(v *Value) bool { func rewriteValuegeneric_OpLsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x8 x (Const8 [c])) + // match: (Lsh16x8 x (Const8 [c])) // cond: // result: (Lsh16x64 x (Const64 [int64(uint8(c))])) for { @@ -10066,7 +7168,7 @@ func rewriteValuegeneric_OpLsh16x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh16x8 (Const16 [0]) _) + // match: (Lsh16x8 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -10086,7 +7188,7 @@ func rewriteValuegeneric_OpLsh16x8(v *Value) bool { func rewriteValuegeneric_OpLsh32x16(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x16 x (Const16 [c])) + // match: (Lsh32x16 x (Const16 [c])) // cond: // result: (Lsh32x64 x (Const64 [int64(uint16(c))])) for { @@ -10104,7 +7206,7 @@ func rewriteValuegeneric_OpLsh32x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh32x16 (Const32 [0]) _) + // match: (Lsh32x16 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -10124,7 +7226,7 @@ func rewriteValuegeneric_OpLsh32x16(v *Value) bool { func rewriteValuegeneric_OpLsh32x32(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x32 x (Const32 [c])) + // match: (Lsh32x32 x (Const32 [c])) // cond: // result: (Lsh32x64 x (Const64 [int64(uint32(c))])) for { @@ -10142,7 +7244,7 @@ func rewriteValuegeneric_OpLsh32x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh32x32 (Const32 [0]) _) + // match: (Lsh32x32 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -10164,7 +7266,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) + // match: (Lsh32x64 (Const32 [c]) (Const64 [d])) // cond: // result: (Const32 [int64(int32(c) << uint64(d))]) for { @@ -10182,7 +7284,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { v.AuxInt = int64(int32(c) << uint64(d)) return true } - // match: (Lsh32x64 x (Const64 [0])) + // match: (Lsh32x64 x (Const64 [0])) // cond: // result: x for { @@ -10199,7 +7301,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh32x64 (Const32 [0]) _) + // match: (Lsh32x64 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -10214,7 +7316,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh32x64 _ (Const64 [c])) + // match: (Lsh32x64 _ (Const64 [c])) // cond: uint64(c) >= 32 // result: (Const32 [0]) for { @@ -10303,7 +7405,7 @@ func rewriteValuegeneric_OpLsh32x64(v *Value) bool { func rewriteValuegeneric_OpLsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x8 x (Const8 [c])) + // match: (Lsh32x8 x (Const8 [c])) // cond: // result: (Lsh32x64 x (Const64 [int64(uint8(c))])) for { @@ -10321,7 +7423,7 @@ func rewriteValuegeneric_OpLsh32x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh32x8 (Const32 [0]) _) + // match: (Lsh32x8 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -10341,7 +7443,7 @@ func rewriteValuegeneric_OpLsh32x8(v *Value) bool { func rewriteValuegeneric_OpLsh64x16(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x16 x (Const16 [c])) + // match: (Lsh64x16 x (Const16 [c])) // cond: // result: (Lsh64x64 x (Const64 [int64(uint16(c))])) for { @@ -10359,7 +7461,7 @@ func rewriteValuegeneric_OpLsh64x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh64x16 (Const64 [0]) _) + // match: (Lsh64x16 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -10379,7 +7481,7 @@ func rewriteValuegeneric_OpLsh64x16(v *Value) bool { func rewriteValuegeneric_OpLsh64x32(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x32 x (Const32 [c])) + // match: (Lsh64x32 x (Const32 [c])) // cond: // result: (Lsh64x64 x (Const64 [int64(uint32(c))])) for { @@ -10397,7 +7499,7 @@ func rewriteValuegeneric_OpLsh64x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh64x32 (Const64 [0]) _) + // match: (Lsh64x32 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -10419,7 +7521,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) + // match: (Lsh64x64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c << uint64(d)]) for { @@ -10437,7 +7539,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { v.AuxInt = c << uint64(d) return true } - // match: (Lsh64x64 x (Const64 [0])) + // match: (Lsh64x64 x (Const64 [0])) // cond: // result: x for { @@ -10454,7 +7556,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh64x64 (Const64 [0]) _) + // match: (Lsh64x64 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -10469,7 +7571,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh64x64 _ (Const64 [c])) + // match: (Lsh64x64 _ (Const64 [c])) // cond: uint64(c) >= 64 // result: (Const64 [0]) for { @@ -10558,7 +7660,7 @@ func rewriteValuegeneric_OpLsh64x64(v *Value) bool { func rewriteValuegeneric_OpLsh64x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x8 x (Const8 [c])) + // match: (Lsh64x8 x (Const8 [c])) // cond: // result: (Lsh64x64 x (Const64 [int64(uint8(c))])) for { @@ -10576,7 +7678,7 @@ func rewriteValuegeneric_OpLsh64x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh64x8 (Const64 [0]) _) + // match: (Lsh64x8 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -10596,7 +7698,7 @@ func rewriteValuegeneric_OpLsh64x8(v *Value) bool { func rewriteValuegeneric_OpLsh8x16(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x16 x (Const16 [c])) + // match: (Lsh8x16 x (Const16 [c])) // cond: // result: (Lsh8x64 x (Const64 [int64(uint16(c))])) for { @@ -10614,7 +7716,7 @@ func rewriteValuegeneric_OpLsh8x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh8x16 (Const8 [0]) _) + // match: (Lsh8x16 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -10634,7 +7736,7 @@ func rewriteValuegeneric_OpLsh8x16(v *Value) bool { func rewriteValuegeneric_OpLsh8x32(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x32 x (Const32 [c])) + // match: (Lsh8x32 x (Const32 [c])) // cond: // result: (Lsh8x64 x (Const64 [int64(uint32(c))])) for { @@ -10652,7 +7754,7 @@ func rewriteValuegeneric_OpLsh8x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh8x32 (Const8 [0]) _) + // match: (Lsh8x32 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -10674,7 +7776,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) + // match: (Lsh8x64 (Const8 [c]) (Const64 [d])) // cond: // result: (Const8 [int64(int8(c) << uint64(d))]) for { @@ -10692,7 +7794,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { v.AuxInt = int64(int8(c) << uint64(d)) return true } - // match: (Lsh8x64 x (Const64 [0])) + // match: (Lsh8x64 x (Const64 [0])) // cond: // result: x for { @@ -10709,7 +7811,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { v.AddArg(x) return true } - // match: (Lsh8x64 (Const8 [0]) _) + // match: (Lsh8x64 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -10724,7 +7826,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh8x64 _ (Const64 [c])) + // match: (Lsh8x64 _ (Const64 [c])) // cond: uint64(c) >= 8 // result: (Const8 [0]) for { @@ -10740,7 +7842,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) + // match: (Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) // result: (Lsh8x64 x (Const64 [c+d])) for { @@ -10813,7 +7915,7 @@ func rewriteValuegeneric_OpLsh8x64(v *Value) bool { func rewriteValuegeneric_OpLsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x8 x (Const8 [c])) + // match: (Lsh8x8 x (Const8 [c])) // cond: // result: (Lsh8x64 x (Const64 [int64(uint8(c))])) for { @@ -10831,7 +7933,7 @@ func rewriteValuegeneric_OpLsh8x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Lsh8x8 (Const8 [0]) _) + // match: (Lsh8x8 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -10894,7 +7996,7 @@ func rewriteValuegeneric_OpMod16(v *Value) bool { v.AddArg(v0) return true } - // match: (Mod16 x (Const16 [c])) + // match: (Mod16 x (Const16 [c])) // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15) // result: (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) for { @@ -11047,7 +8149,7 @@ func rewriteValuegeneric_OpMod32(v *Value) bool { v.AddArg(v0) return true } - // match: (Mod32 x (Const32 [c])) + // match: (Mod32 x (Const32 [c])) // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31) // result: (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) for { @@ -11200,7 +8302,7 @@ func rewriteValuegeneric_OpMod64(v *Value) bool { v.AddArg(v0) return true } - // match: (Mod64 x (Const64 [c])) + // match: (Mod64 x (Const64 [c])) // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63) // result: (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) for { @@ -11310,7 +8412,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { func rewriteValuegeneric_OpMod8(v *Value) bool { b := v.Block _ = b - // match: (Mod8 (Const8 [c]) (Const8 [d])) + // match: (Mod8 (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(int8(c % d))]) for { @@ -11331,7 +8433,7 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { v.AuxInt = int64(int8(c % d)) return true } - // match: (Mod8 n (Const8 [c])) + // match: (Mod8 n (Const8 [c])) // cond: c < 0 && c != -1<<7 // result: (Mod8 n (Const8 [-c])) for { @@ -11353,7 +8455,7 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { v.AddArg(v0) return true } - // match: (Mod8 x (Const8 [c])) + // match: (Mod8 x (Const8 [c])) // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7) // result: (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) for { @@ -11387,7 +8489,7 @@ func rewriteValuegeneric_OpMod8(v *Value) bool { func rewriteValuegeneric_OpMod8u(v *Value) bool { b := v.Block _ = b - // match: (Mod8u (Const8 [c]) (Const8 [d])) + // match: (Mod8u (Const8 [c]) (Const8 [d])) // cond: d != 0 // result: (Const8 [int64(uint8(c) % uint8(d))]) for { @@ -11408,7 +8510,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { v.AuxInt = int64(uint8(c) % uint8(d)) return true } - // match: (Mod8u n (Const8 [c])) + // match: (Mod8u n (Const8 [c])) // cond: isPowerOfTwo(c&0xff) // result: (And8 n (Const8 [(c&0xff)-1])) for { @@ -11429,7 +8531,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { v.AddArg(v0) return true } - // match: (Mod8u x (Const8 [c])) + // match: (Mod8u x (Const8 [c])) // cond: x.Op != OpConst8 && c > 0 && umagicOK(8 ,c) // result: (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) for { @@ -11465,7 +8567,7 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mul16 (Const16 [c]) (Const16 [d])) + // match: (Mul16 (Const16 [c]) (Const16 [d])) // cond: // result: (Const16 [int64(int16(c*d))]) for { @@ -11483,24 +8585,6 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AuxInt = int64(int16(c * d)) return true } - // match: (Mul16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (Const16 [int64(int16(c*d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c * d)) - return true - } // match: (Mul16 (Const16 [1]) x) // cond: // result: x @@ -11518,23 +8602,6 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AddArg(x) return true } - // match: (Mul16 x (Const16 [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Mul16 (Const16 [-1]) x) // cond: // result: (Neg16 x) @@ -11551,22 +8618,6 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AddArg(x) return true } - // match: (Mul16 x (Const16 [-1])) - // cond: - // result: (Neg16 x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpNeg16) - v.AddArg(x) - return true - } // match: (Mul16 n (Const16 [c])) // cond: isPowerOfTwo(c) // result: (Lsh16x64 n (Const64 [log2(c)])) @@ -11589,28 +8640,6 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul16 (Const16 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh16x64 n (Const64 [log2(c)])) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - n := v.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh16x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } // match: (Mul16 n (Const16 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) // result: (Neg16 (Lsh16x64 n (Const64 [log2(-c)]))) @@ -11634,27 +8663,25 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul16 (Const16 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg16 (Lsh16x64 n (Const64 [log2(-c)]))) + // match: (Mul16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Mul16 (Const16 [c]) x) for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst16 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst16 { break } - c := v_0.AuxInt - n := v.Args[1] - if !(t.IsSigned() && isPowerOfTwo(-c)) { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst16) { break } - v.reset(OpNeg16) - v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) + v.reset(OpMul16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c v.AddArg(v0) + v.AddArg(x) return true } // match: (Mul16 (Const16 [0]) _) @@ -11672,20 +8699,22 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AuxInt = 0 return true } - // match: (Mul16 _ (Const16 [0])) - // cond: - // result: (Const16 [0]) + // match: (Mul16 x l:(Mul16 _ _)) + // cond: (x.Op != OpMul16 && x.Op != OpConst16) + // result: (Mul16 l x) for { - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpMul16 { break } - if v_1.AuxInt != 0 { + if !(x.Op != OpMul16 && x.Op != OpConst16) { break } - v.reset(OpConst16) - v.AuxInt = 0 - return true + v.reset(OpMul16) + v.AddArg(l) + v.AddArg(x) + return true } // match: (Mul16 (Const16 [c]) (Mul16 (Const16 [d]) x)) // cond: @@ -11717,96 +8746,6 @@ func rewriteValuegeneric_OpMul16(v *Value) bool { v.AddArg(x) return true } - // match: (Mul16 (Const16 [c]) (Mul16 x (Const16 [d]))) - // cond: - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul16 (Mul16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul16 (Mul16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Mul16 (Const16 [int64(int16(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMul16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpMul32(v *Value) bool { @@ -11814,7 +8753,7 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mul32 (Const32 [c]) (Const32 [d])) + // match: (Mul32 (Const32 [c]) (Const32 [d])) // cond: // result: (Const32 [int64(int32(c*d))]) for { @@ -11832,24 +8771,6 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AuxInt = int64(int32(c * d)) return true } - // match: (Mul32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (Const32 [int64(int32(c*d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c * d)) - return true - } // match: (Mul32 (Const32 [1]) x) // cond: // result: x @@ -11867,23 +8788,6 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32 x (Const32 [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Mul32 (Const32 [-1]) x) // cond: // result: (Neg32 x) @@ -11900,22 +8804,6 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AddArg(x) return true } - // match: (Mul32 x (Const32 [-1])) - // cond: - // result: (Neg32 x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpNeg32) - v.AddArg(x) - return true - } // match: (Mul32 n (Const32 [c])) // cond: isPowerOfTwo(c) // result: (Lsh32x64 n (Const64 [log2(c)])) @@ -11938,28 +8826,6 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul32 (Const32 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh32x64 n (Const64 [log2(c)])) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - n := v.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh32x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } // match: (Mul32 n (Const32 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) // result: (Neg32 (Lsh32x64 n (Const64 [log2(-c)]))) @@ -11983,27 +8849,25 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul32 (Const32 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg32 (Lsh32x64 n (Const64 [log2(-c)]))) + // match: (Mul32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Mul32 (Const32 [c]) x) for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst32 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst32 { break } - c := v_0.AuxInt - n := v.Args[1] - if !(t.IsSigned() && isPowerOfTwo(-c)) { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst32) { break } - v.reset(OpNeg32) - v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) + v.reset(OpMul32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c v.AddArg(v0) + v.AddArg(x) return true } // match: (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) @@ -12044,284 +8908,82 @@ func rewriteValuegeneric_OpMul32(v *Value) bool { v.AddArg(v1) return true } - // match: (Mul32 (Const32 [c]) (Add32 x (Const32 [d]))) + // match: (Mul32 (Const32 [0]) _) // cond: - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) + // result: (Const32 [0]) for { v_0 := v.Args[0] if v_0.Op != OpConst32 { break } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - if v_1.Type != t { + if v_0.AuxInt != 0 { break } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + v.reset(OpConst32) + v.AuxInt = 0 + return true + } + // match: (Mul32 x l:(Mul32 _ _)) + // cond: (x.Op != OpMul32 && x.Op != OpConst32) + // result: (Mul32 l x) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpMul32 { break } - if v_1_1.Type != t { + if !(x.Op != OpMul32 && x.Op != OpConst32) { break } - d := v_1_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v.reset(OpMul32) + v.AddArg(l) + v.AddArg(x) return true } - // match: (Mul32 (Add32 (Const32 [d]) x) (Const32 [c])) + // match: (Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) // cond: - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) + // result: (Mul32 (Const32 [int64(int32(c*d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { + if v_0.Op != OpConst32 { break } t := v_0.Type - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - if v_0_0.Type != t { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpMul32 { break } - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt - v.reset(OpAdd32) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpMul32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c * d)) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) + v.AddArg(x) return true } - // match: (Mul32 (Add32 x (Const32 [d])) (Const32 [c])) + return false +} +func rewriteValuegeneric_OpMul32F(v *Value) bool { + // match: (Mul32F (Const32F [c]) (Const32F [d])) // cond: - // result: (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) + // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - t := v_0.Type - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + if v_0.Op != OpConst32F { break } - if v_0_1.Type != t { - break - } - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul32, t) - v2 := b.NewValue0(v.Pos, OpConst32, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul32 (Const32 [0]) _) - // cond: - // result: (Const32 [0]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - if v_0.AuxInt != 0 { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - // match: (Mul32 _ (Const32 [0])) - // cond: - // result: (Const32 [0]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpConst32) - v.AuxInt = 0 - return true - } - // match: (Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) - // cond: - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Const32 [c]) (Mul32 x (Const32 [d]))) - // cond: - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul32 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Mul32 (Const32 [d]) x) (Const32 [c])) - // cond: - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul32 (Mul32 x (Const32 [d])) (Const32 [c])) - // cond: - // result: (Mul32 (Const32 [int64(int32(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMul32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpMul32F(v *Value) bool { - // match: (Mul32F (Const32F [c]) (Const32F [d])) - // cond: - // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { break } d := v_1.AuxInt @@ -12329,24 +8991,6 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool { v.AuxInt = f2i(float64(i2f32(c) * i2f32(d))) return true } - // match: (Mul32F (Const32F [d]) (Const32F [c])) - // cond: - // result: (Const32F [f2i(float64(i2f32(c) * i2f32(d)))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32F { - break - } - c := v_1.AuxInt - v.reset(OpConst32F) - v.AuxInt = f2i(float64(i2f32(c) * i2f32(d))) - return true - } // match: (Mul32F x (Const32F [f2i(1)])) // cond: // result: x @@ -12420,7 +9064,7 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Mul64 (Const64 [c]) (Const64 [d])) + // match: (Mul64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c*d]) for { @@ -12438,24 +9082,6 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AuxInt = c * d return true } - // match: (Mul64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (Const64 [c*d]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c * d - return true - } // match: (Mul64 (Const64 [1]) x) // cond: // result: x @@ -12473,23 +9099,6 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64 x (Const64 [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Mul64 (Const64 [-1]) x) // cond: // result: (Neg64 x) @@ -12506,22 +9115,6 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64 x (Const64 [-1])) - // cond: - // result: (Neg64 x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpNeg64) - v.AddArg(x) - return true - } // match: (Mul64 n (Const64 [c])) // cond: isPowerOfTwo(c) // result: (Lsh64x64 n (Const64 [log2(c)])) @@ -12544,40 +9137,18 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul64 (Const64 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh64x64 n (Const64 [log2(c)])) + // match: (Mul64 n (Const64 [c])) + // cond: t.IsSigned() && isPowerOfTwo(-c) + // result: (Neg64 (Lsh64x64 n (Const64 [log2(-c)]))) for { t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst64 { + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - c := v_0.AuxInt - n := v.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh64x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul64 n (Const64 [c])) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg64 (Lsh64x64 n (Const64 [log2(-c)]))) - for { - t := v.Type - n := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(t.IsSigned() && isPowerOfTwo(-c)) { + c := v_1.AuxInt + if !(t.IsSigned() && isPowerOfTwo(-c)) { break } v.reset(OpNeg64) @@ -12589,27 +9160,25 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul64 (Const64 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg64 (Lsh64x64 n (Const64 [log2(-c)]))) + // match: (Mul64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Mul64 (Const64 [c]) x) for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst64 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - c := v_0.AuxInt - n := v.Args[1] - if !(t.IsSigned() && isPowerOfTwo(-c)) { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst64) { break } - v.reset(OpNeg64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) + v.reset(OpMul64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c v.AddArg(v0) + v.AddArg(x) return true } // match: (Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) @@ -12650,120 +9219,6 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(v1) return true } - // match: (Mul64 (Const64 [c]) (Add64 x (Const64 [d]))) - // cond: - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - if v_1.Type != t { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul64 (Add64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - t := v_0.Type - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - if v_0_0.Type != t { - break - } - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } - // match: (Mul64 (Add64 x (Const64 [d])) (Const64 [c])) - // cond: - // result: (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - t := v_0.Type - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - if v_0_1.Type != t { - break - } - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpAdd64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpMul64, t) - v2 := b.NewValue0(v.Pos, OpConst64, t) - v2.AuxInt = c - v1.AddArg(v2) - v1.AddArg(x) - v.AddArg(v1) - return true - } // match: (Mul64 (Const64 [0]) _) // cond: // result: (Const64 [0]) @@ -12779,19 +9234,21 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Mul64 _ (Const64 [0])) - // cond: - // result: (Const64 [0]) + // match: (Mul64 x l:(Mul64 _ _)) + // cond: (x.Op != OpMul64 && x.Op != OpConst64) + // result: (Mul64 l x) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpMul64 { break } - if v_1.AuxInt != 0 { + if !(x.Op != OpMul64 && x.Op != OpConst64) { break } - v.reset(OpConst64) - v.AuxInt = 0 + v.reset(OpMul64) + v.AddArg(l) + v.AddArg(x) return true } // match: (Mul64 (Const64 [c]) (Mul64 (Const64 [d]) x)) @@ -12824,211 +9281,103 @@ func rewriteValuegeneric_OpMul64(v *Value) bool { v.AddArg(x) return true } - // match: (Mul64 (Const64 [c]) (Mul64 x (Const64 [d]))) + return false +} +func rewriteValuegeneric_OpMul64F(v *Value) bool { + // match: (Mul64F (Const64F [c]) (Const64F [d])) // cond: - // result: (Mul64 (Const64 [c*d]) x) + // result: (Const64F [f2i(i2f(c) * i2f(d))]) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst64F { break } - t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpMul64 { + if v_1.Op != OpConst64F { break } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + d := v_1.AuxInt + v.reset(OpConst64F) + v.AuxInt = f2i(i2f(c) * i2f(d)) + return true + } + // match: (Mul64F x (Const64F [f2i(1)])) + // cond: + // result: x + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64F { break } - if v_1_1.Type != t { + if v_1.AuxInt != f2i(1) { break } - d := v_1_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } - // match: (Mul64 (Mul64 (Const64 [d]) x) (Const64 [c])) + // match: (Mul64F (Const64F [f2i(1)]) x) // cond: - // result: (Mul64 (Const64 [c*d]) x) + // result: x for { v_0 := v.Args[0] - if v_0.Op != OpMul64 { + if v_0.Op != OpConst64F { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + if v_0.AuxInt != f2i(1) { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Mul64F x (Const64F [f2i(-1)])) + // cond: + // result: (Neg64F x) + for { + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst64F { break } - if v_1.Type != t { + if v_1.AuxInt != f2i(-1) { break } - c := v_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) + v.reset(OpNeg64F) v.AddArg(x) return true } - // match: (Mul64 (Mul64 x (Const64 [d])) (Const64 [c])) + // match: (Mul64F (Const64F [f2i(-1)]) x) // cond: - // result: (Mul64 (Const64 [c*d]) x) + // result: (Neg64F x) for { v_0 := v.Args[0] - if v_0.Op != OpMul64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.Op != OpConst64F { break } - if v_1.Type != t { + if v_0.AuxInt != f2i(-1) { break } - c := v_1.AuxInt - v.reset(OpMul64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c * d - v.AddArg(v0) + x := v.Args[1] + v.reset(OpNeg64F) v.AddArg(x) return true } return false } -func rewriteValuegeneric_OpMul64F(v *Value) bool { - // match: (Mul64F (Const64F [c]) (Const64F [d])) +func rewriteValuegeneric_OpMul8(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (Mul8 (Const8 [c]) (Const8 [d])) // cond: - // result: (Const64F [f2i(i2f(c) * i2f(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - d := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) * i2f(d)) - return true - } - // match: (Mul64F (Const64F [d]) (Const64F [c])) - // cond: - // result: (Const64F [f2i(i2f(c) * i2f(d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - c := v_1.AuxInt - v.reset(OpConst64F) - v.AuxInt = f2i(i2f(c) * i2f(d)) - return true - } - // match: (Mul64F x (Const64F [f2i(1)])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - if v_1.AuxInt != f2i(1) { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul64F (Const64F [f2i(1)]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - if v_0.AuxInt != f2i(1) { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul64F x (Const64F [f2i(-1)])) - // cond: - // result: (Neg64F x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64F { - break - } - if v_1.AuxInt != f2i(-1) { - break - } - v.reset(OpNeg64F) - v.AddArg(x) - return true - } - // match: (Mul64F (Const64F [f2i(-1)]) x) - // cond: - // result: (Neg64F x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64F { - break - } - if v_0.AuxInt != f2i(-1) { - break - } - x := v.Args[1] - v.reset(OpNeg64F) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpMul8(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (Mul8 (Const8 [c]) (Const8 [d])) - // cond: - // result: (Const8 [int64(int8(c*d))]) + // result: (Const8 [int64(int8(c*d))]) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { @@ -13044,25 +9393,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AuxInt = int64(int8(c * d)) return true } - // match: (Mul8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (Const8 [int64(int8(c*d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c * d)) - return true - } - // match: (Mul8 (Const8 [1]) x) + // match: (Mul8 (Const8 [1]) x) // cond: // result: x for { @@ -13079,24 +9410,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AddArg(x) return true } - // match: (Mul8 x (Const8 [1])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != 1 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Mul8 (Const8 [-1]) x) + // match: (Mul8 (Const8 [-1]) x) // cond: // result: (Neg8 x) for { @@ -13112,23 +9426,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AddArg(x) return true } - // match: (Mul8 x (Const8 [-1])) - // cond: - // result: (Neg8 x) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpNeg8) - v.AddArg(x) - return true - } - // match: (Mul8 n (Const8 [c])) + // match: (Mul8 n (Const8 [c])) // cond: isPowerOfTwo(c) // result: (Lsh8x64 n (Const64 [log2(c)])) for { @@ -13150,29 +9448,7 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul8 (Const8 [c]) n) - // cond: isPowerOfTwo(c) - // result: (Lsh8x64 n (Const64 [log2(c)])) - for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - n := v.Args[1] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpLsh8x64) - v.Type = t - v.AddArg(n) - v0 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v0.AuxInt = log2(c) - v.AddArg(v0) - return true - } - // match: (Mul8 n (Const8 [c])) + // match: (Mul8 n (Const8 [c])) // cond: t.IsSigned() && isPowerOfTwo(-c) // result: (Neg8 (Lsh8x64 n (Const64 [log2(-c)]))) for { @@ -13195,30 +9471,28 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AddArg(v0) return true } - // match: (Mul8 (Const8 [c]) n) - // cond: t.IsSigned() && isPowerOfTwo(-c) - // result: (Neg8 (Lsh8x64 n (Const64 [log2(-c)]))) + // match: (Mul8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Mul8 (Const8 [c]) x) for { - t := v.Type - v_0 := v.Args[0] - if v_0.Op != OpConst8 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { break } - c := v_0.AuxInt - n := v.Args[1] - if !(t.IsSigned() && isPowerOfTwo(-c)) { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst8) { break } - v.reset(OpNeg8) - v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v0.AddArg(n) - v1 := b.NewValue0(v.Pos, OpConst64, types.UInt64) - v1.AuxInt = log2(-c) - v0.AddArg(v1) + v.reset(OpMul8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c v.AddArg(v0) + v.AddArg(x) return true } - // match: (Mul8 (Const8 [0]) _) + // match: (Mul8 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -13233,22 +9507,24 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AuxInt = 0 return true } - // match: (Mul8 _ (Const8 [0])) - // cond: - // result: (Const8 [0]) + // match: (Mul8 x l:(Mul8 _ _)) + // cond: (x.Op != OpMul8 && x.Op != OpConst8) + // result: (Mul8 l x) for { - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpMul8 { break } - if v_1.AuxInt != 0 { + if !(x.Op != OpMul8 && x.Op != OpConst8) { break } - v.reset(OpConst8) - v.AuxInt = 0 + v.reset(OpMul8) + v.AddArg(l) + v.AddArg(x) return true } - // match: (Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) + // match: (Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) // cond: // result: (Mul8 (Const8 [int64(int8(c*d))]) x) for { @@ -13278,136 +9554,46 @@ func rewriteValuegeneric_OpMul8(v *Value) bool { v.AddArg(x) return true } - // match: (Mul8 (Const8 [c]) (Mul8 x (Const8 [d]))) + return false +} +func rewriteValuegeneric_OpNeg16(v *Value) bool { + // match: (Neg16 (Const16 [c])) // cond: - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) + // result: (Const16 [int64(-int16(c))]) for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst16 { break } - t := v_0.Type c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpMul8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { + v.reset(OpConst16) + v.AuxInt = int64(-int16(c)) + return true + } + // match: (Neg16 (Sub16 x y)) + // cond: + // result: (Sub16 y x) + for { + v_0 := v.Args[0] + if v_0.Op != OpSub16 { break } - d := v_1_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) + x := v_0.Args[0] + y := v_0.Args[1] + v.reset(OpSub16) + v.AddArg(y) v.AddArg(x) return true } - // match: (Mul8 (Mul8 (Const8 [d]) x) (Const8 [c])) + return false +} +func rewriteValuegeneric_OpNeg32(v *Value) bool { + // match: (Neg32 (Const32 [c])) // cond: - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) + // result: (Const32 [int64(-int32(c))]) for { v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Mul8 (Mul8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (Mul8 (Const8 [int64(int8(c*d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpMul8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpMul8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c * d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpNeg16(v *Value) bool { - // match: (Neg16 (Const16 [c])) - // cond: - // result: (Const16 [int64(-int16(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - c := v_0.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(-int16(c)) - return true - } - // match: (Neg16 (Sub16 x y)) - // cond: - // result: (Sub16 y x) - for { - v_0 := v.Args[0] - if v_0.Op != OpSub16 { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - v.reset(OpSub16) - v.AddArg(y) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpNeg32(v *Value) bool { - // match: (Neg32 (Const32 [c])) - // cond: - // result: (Const32 [int64(-int32(c))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt @@ -13452,7 +9638,7 @@ func rewriteValuegeneric_OpNeg32F(v *Value) bool { return false } func rewriteValuegeneric_OpNeg64(v *Value) bool { - // match: (Neg64 (Const64 [c])) + // match: (Neg64 (Const64 [c])) // cond: // result: (Const64 [-c]) for { @@ -13502,7 +9688,7 @@ func rewriteValuegeneric_OpNeg64F(v *Value) bool { return false } func rewriteValuegeneric_OpNeg8(v *Value) bool { - // match: (Neg8 (Const8 [c])) + // match: (Neg8 (Const8 [c])) // cond: // result: (Const8 [int64( -int8(c))]) for { @@ -13515,7 +9701,7 @@ func rewriteValuegeneric_OpNeg8(v *Value) bool { v.AuxInt = int64(-int8(c)) return true } - // match: (Neg8 (Sub8 x y)) + // match: (Neg8 (Sub8 x y)) // cond: // result: (Sub8 y x) for { @@ -13577,92 +9763,23 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { v.AddArg(x) return true } - // match: (Neq16 (Const16 [c]) (Add16 x (Const16 [d]))) - // cond: - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq16 (Add16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) + // match: (Neq16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Neq16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } - if v_1.Type != t { - break - } + t := v_1.Type c := v_1.AuxInt - v.reset(OpNeq16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq16 (Add16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Neq16 (Const16 [int64(int16(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { + if !(x.Op != OpConst16) { break } - c := v_1.AuxInt v.reset(OpNeq16) v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c - d)) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true @@ -13685,24 +9802,6 @@ func rewriteValuegeneric_OpNeq16(v *Value) bool { v.AuxInt = b2i(c != d) return true } - // match: (Neq16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (ConstBool [b2i(c != d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } return false } func rewriteValuegeneric_OpNeq32(v *Value) bool { @@ -13750,138 +9849,137 @@ func rewriteValuegeneric_OpNeq32(v *Value) bool { v.AddArg(x) return true } - // match: (Neq32 (Const32 [c]) (Add32 x (Const32 [d]))) - // cond: - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) + // match: (Neq32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Neq32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd32 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + if v_1.Op != OpConst32 { break } - if v_1_1.Type != t { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst32) { break } - d := v_1_1.AuxInt v.reset(OpNeq32) v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true } - // match: (Neq32 (Add32 (Const32 [d]) x) (Const32 [c])) + // match: (Neq32 (Const32 [c]) (Const32 [d])) // cond: - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) + // result: (ConstBool [b2i(c != d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + if v_0.Op != OpConst32 { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + c := v_0.AuxInt v_1 := v.Args[1] if v_1.Op != OpConst32 { break } - if v_1.Type != t { + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) + return true + } + return false +} +func rewriteValuegeneric_OpNeq64(v *Value) bool { + b := v.Block + _ = b + // match: (Neq64 x x) + // cond: + // result: (ConstBool [0]) + for { + x := v.Args[0] + if x != v.Args[1] { break } - c := v_1.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.reset(OpConstBool) + v.AuxInt = 0 return true } - // match: (Neq32 (Add32 x (Const32 [d])) (Const32 [c])) + // match: (Neq64 (Const64 [c]) (Add64 (Const64 [d]) x)) // cond: - // result: (Neq32 (Const32 [int64(int32(c-d))]) x) + // result: (Neq64 (Const64 [c-d]) x) for { v_0 := v.Args[0] - if v_0.Op != OpAdd32 { + if v_0.Op != OpConst64 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpAdd64 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt - v.reset(OpNeq32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c - d)) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - d v.AddArg(v0) v.AddArg(x) return true } - // match: (Neq32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (ConstBool [b2i(c != d)]) + // match: (Neq64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Neq64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst64) { break } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) + v.reset(OpNeq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (Neq32 (Const32 [d]) (Const32 [c])) + // match: (Neq64 (Const64 [c]) (Const64 [d])) // cond: // result: (ConstBool [b2i(c != d)]) for { v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpConst64 { break } - d := v_0.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst32 { + if v_1.Op != OpConst64 { break } - c := v_1.AuxInt + d := v_1.AuxInt v.reset(OpConstBool) v.AuxInt = b2i(c != d) return true } return false } -func rewriteValuegeneric_OpNeq64(v *Value) bool { +func rewriteValuegeneric_OpNeq8(v *Value) bool { b := v.Block _ = b - // match: (Neq64 x x) + // match: (Neq8 x x) // cond: // result: (ConstBool [0]) for { @@ -13893,22 +9991,22 @@ func rewriteValuegeneric_OpNeq64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Neq64 (Const64 [c]) (Add64 (Const64 [d]) x)) + // match: (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) // cond: - // result: (Neq64 (Const64 [c-d]) x) + // result: (Neq8 (Const8 [int64(int8(c-d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst8 { break } t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpAdd64 { + if v_1.Op != OpAdd8 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { + if v_1_0.Op != OpConst8 { break } if v_1_0.Type != t { @@ -13916,426 +10014,166 @@ func rewriteValuegeneric_OpNeq64(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int64(int8(c - d)) v.AddArg(v0) v.AddArg(x) return true } - // match: (Neq64 (Const64 [c]) (Add64 x (Const64 [d]))) - // cond: - // result: (Neq64 (Const64 [c-d]) x) + // match: (Neq8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Neq8 (Const8 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + if v_1.Op != OpConst8 { break } - if v_1_1.Type != t { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst8) { break } - d := v_1_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d + v.reset(OpNeq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c v.AddArg(v0) v.AddArg(x) return true } - // match: (Neq64 (Add64 (Const64 [d]) x) (Const64 [c])) + // match: (Neq8 (Const8 [c]) (Const8 [d])) // cond: - // result: (Neq64 (Const64 [c-d]) x) + // result: (ConstBool [b2i(c != d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { + if v_0.Op != OpConst8 { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { + if v_1.Op != OpConst8 { break } - c := v_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) return true } - // match: (Neq64 (Add64 x (Const64 [d])) (Const64 [c])) + return false +} +func rewriteValuegeneric_OpNeqB(v *Value) bool { + // match: (NeqB (ConstBool [c]) (ConstBool [d])) // cond: - // result: (Neq64 (Const64 [c-d]) x) + // result: (ConstBool [b2i(c != d)]) for { v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + if v_0.Op != OpConstBool { break } - t := v_0_1.Type - d := v_0_1.AuxInt + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { + if v_1.Op != OpConstBool { break } - c := v_1.AuxInt - v.reset(OpNeq64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c - d - v.AddArg(v0) - v.AddArg(x) + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(c != d) return true } - // match: (Neq64 (Const64 [c]) (Const64 [d])) + // match: (NeqB (ConstBool [0]) x) // cond: - // result: (ConstBool [b2i(c != d)]) + // result: x for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConstBool { break } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.AuxInt != 0 { break } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Neq64 (Const64 [d]) (Const64 [c])) + // match: (NeqB (ConstBool [1]) x) // cond: - // result: (ConstBool [b2i(c != d)]) + // result: (Not x) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConstBool { break } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_0.AuxInt != 1 { break } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) + x := v.Args[1] + v.reset(OpNot) + v.AddArg(x) return true } return false } -func rewriteValuegeneric_OpNeq8(v *Value) bool { +func rewriteValuegeneric_OpNeqInter(v *Value) bool { b := v.Block _ = b - // match: (Neq8 x x) + types := &b.Func.Config.Types + _ = types + // match: (NeqInter x y) // cond: - // result: (ConstBool [0]) + // result: (NeqPtr (ITab x) (ITab y)) for { x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpConstBool) - v.AuxInt = 0 + y := v.Args[1] + v.reset(OpNeqPtr) + v0 := b.NewValue0(v.Pos, OpITab, types.BytePtr) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpITab, types.BytePtr) + v1.AddArg(y) + v.AddArg(v1) return true } - // match: (Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) +} +func rewriteValuegeneric_OpNeqPtr(v *Value) bool { + // match: (NeqPtr p (ConstNil)) // cond: - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) + // result: (IsNonNil p) for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt + p := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { - break - } - if v_1_0.Type != t { + if v_1.Op != OpConstNil { break } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + v.reset(OpIsNonNil) + v.AddArg(p) return true } - // match: (Neq8 (Const8 [c]) (Add8 x (Const8 [d]))) + // match: (NeqPtr (ConstNil) p) // cond: - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) + // result: (IsNonNil p) for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpAdd8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { + if v_0.Op != OpConstNil { break } - d := v_1_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) + p := v.Args[1] + v.reset(OpIsNonNil) + v.AddArg(p) return true } - // match: (Neq8 (Add8 (Const8 [d]) x) (Const8 [c])) + return false +} +func rewriteValuegeneric_OpNeqSlice(v *Value) bool { + b := v.Block + _ = b + types := &b.Func.Config.Types + _ = types + // match: (NeqSlice x y) // cond: - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq8 (Add8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (Neq8 (Const8 [int64(int8(c-d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpNeq8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c - d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Neq8 (Const8 [c]) (Const8 [d])) - // cond: - // result: (ConstBool [b2i(c != d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (Neq8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (ConstBool [b2i(c != d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - c := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - return false -} -func rewriteValuegeneric_OpNeqB(v *Value) bool { - // match: (NeqB (ConstBool [c]) (ConstBool [d])) - // cond: - // result: (ConstBool [b2i(c != d)]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConstBool { - break - } - d := v_1.AuxInt - v.reset(OpConstBool) - v.AuxInt = b2i(c != d) - return true - } - // match: (NeqB (ConstBool [0]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (NeqB (ConstBool [1]) x) - // cond: - // result: (Not x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstBool { - break - } - if v_0.AuxInt != 1 { - break - } - x := v.Args[1] - v.reset(OpNot) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpNeqInter(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (NeqInter x y) - // cond: - // result: (NeqPtr (ITab x) (ITab y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpNeqPtr) - v0 := b.NewValue0(v.Pos, OpITab, types.BytePtr) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpITab, types.BytePtr) - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValuegeneric_OpNeqPtr(v *Value) bool { - // match: (NeqPtr p (ConstNil)) - // cond: - // result: (IsNonNil p) - for { - p := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConstNil { - break - } - v.reset(OpIsNonNil) - v.AddArg(p) - return true - } - // match: (NeqPtr (ConstNil) p) - // cond: - // result: (IsNonNil p) - for { - v_0 := v.Args[0] - if v_0.Op != OpConstNil { - break - } - p := v.Args[1] - v.reset(OpIsNonNil) - v.AddArg(p) - return true - } - return false -} -func rewriteValuegeneric_OpNeqSlice(v *Value) bool { - b := v.Block - _ = b - types := &b.Func.Config.Types - _ = types - // match: (NeqSlice x y) - // cond: - // result: (NeqPtr (SlicePtr x) (SlicePtr y)) + // result: (NeqPtr (SlicePtr x) (SlicePtr y)) for { x := v.Args[0] y := v.Args[1] @@ -14479,7 +10317,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Eq8 x y)) + // match: (Not (Eq8 x y)) // cond: // result: (Neq8 x y) for { @@ -14494,7 +10332,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (EqB x y)) + // match: (Not (EqB x y)) // cond: // result: (NeqB x y) for { @@ -14554,7 +10392,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Neq8 x y)) + // match: (Not (Neq8 x y)) // cond: // result: (Eq8 x y) for { @@ -14569,7 +10407,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (NeqB x y)) + // match: (Not (NeqB x y)) // cond: // result: (EqB x y) for { @@ -14629,7 +10467,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Greater8 x y)) + // match: (Not (Greater8 x y)) // cond: // result: (Leq8 x y) for { @@ -14689,7 +10527,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Greater8U x y)) + // match: (Not (Greater8U x y)) // cond: // result: (Leq8U x y) for { @@ -14749,7 +10587,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Geq8 x y)) + // match: (Not (Geq8 x y)) // cond: // result: (Less8 x y) for { @@ -14809,7 +10647,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Geq8U x y)) + // match: (Not (Geq8U x y)) // cond: // result: (Less8U x y) for { @@ -14869,7 +10707,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Less8 x y)) + // match: (Not (Less8 x y)) // cond: // result: (Geq8 x y) for { @@ -14929,7 +10767,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Less8U x y)) + // match: (Not (Less8U x y)) // cond: // result: (Geq8U x y) for { @@ -14989,7 +10827,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Leq8 x y)) + // match: (Not (Leq8 x y)) // cond: // result: (Greater8 x y) for { @@ -15049,7 +10887,7 @@ func rewriteValuegeneric_OpNot(v *Value) bool { v.AddArg(y) return true } - // match: (Not (Leq8U x y)) + // match: (Not (Leq8U x y)) // cond: // result: (Greater8U x y) for { @@ -15104,7 +10942,7 @@ func rewriteValuegeneric_OpOffPtr(v *Value) bool { func rewriteValuegeneric_OpOr16(v *Value) bool { b := v.Block _ = b - // match: (Or16 (Const16 [c]) (Const16 [d])) + // match: (Or16 (Const16 [c]) (Const16 [d])) // cond: // result: (Const16 [int64(int16(c|d))]) for { @@ -15122,22 +10960,25 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { v.AuxInt = int64(int16(c | d)) return true } - // match: (Or16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (Const16 [int64(int16(c|d))]) + // match: (Or16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Or16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c | d)) + if !(x.Op != OpConst16) { + break + } + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Or16 x x) @@ -15170,805 +11011,211 @@ func rewriteValuegeneric_OpOr16(v *Value) bool { v.AddArg(x) return true } - // match: (Or16 x (Const16 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Or16 (Const16 [-1]) _) // cond: - // result: (Const16 [-1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - if v_0.AuxInt != -1 { - break - } - v.reset(OpConst16) - v.AuxInt = -1 - return true - } - // match: (Or16 _ (Const16 [-1])) - // cond: - // result: (Const16 [-1]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpConst16) - v.AuxInt = -1 - return true - } - // match: (Or16 x (Or16 x y)) - // cond: - // result: (Or16 x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - if x != v_1.Args[0] { - break - } - y := v_1.Args[1] - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 x (Or16 y x)) - // cond: - // result: (Or16 x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 (Or16 x y) x) - // cond: - // result: (Or16 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 (Or16 y x) x) - // cond: - // result: (Or16 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpOr16) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or16 (Or16 i:(Const16 ) z) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 (Or16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 x (Or16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 x (Or16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Or16 i (Or16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpOr16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) - // cond: - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Const16 [c]) (Or16 x (Const16 [d]))) - // cond: - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Or16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or16 (Or16 x (Const16 [d])) (Const16 [c])) - // cond: - // result: (Or16 (Const16 [int64(int16(c|d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c | d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - return false -} -func rewriteValuegeneric_OpOr32(v *Value) bool { - b := v.Block - _ = b - // match: (Or32 (Const32 [c]) (Const32 [d])) - // cond: - // result: (Const32 [int64(int32(c|d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - d := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c | d)) - return true - } - // match: (Or32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (Const32 [int64(int32(c|d))]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c | d)) - return true - } - // match: (Or32 x x) - // cond: - // result: x - for { - x := v.Args[0] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or32 (Const32 [0]) x) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - if v_0.AuxInt != 0 { - break - } - x := v.Args[1] - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or32 x (Const32 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or32 (Const32 [-1]) _) - // cond: - // result: (Const32 [-1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - if v_0.AuxInt != -1 { - break - } - v.reset(OpConst32) - v.AuxInt = -1 - return true - } - // match: (Or32 _ (Const32 [-1])) - // cond: - // result: (Const32 [-1]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpConst32) - v.AuxInt = -1 - return true - } - // match: (Or32 x (Or32 x y)) - // cond: - // result: (Or32 x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - if x != v_1.Args[0] { - break - } - y := v_1.Args[1] - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 x (Or32 y x)) - // cond: - // result: (Or32 x y) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - y := v_1.Args[0] - if x != v_1.Args[1] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 (Or32 x y) x) - // cond: - // result: (Or32 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - x := v_0.Args[0] - y := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 (Or32 y x) x) - // cond: - // result: (Or32 x y) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpOr32) - v.AddArg(x) - v.AddArg(y) - return true - } - // match: (Or32 (Or32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or32 (Or32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { + // result: (Const16 [-1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst16 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if v_0.AuxInt != -1 { break } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpConst16) + v.AuxInt = -1 return true } - // match: (Or32 x (Or32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) + // match: (Or16 x (Or16 x y)) + // cond: + // result: (Or16 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - i := v_1.Args[0] - if i.Op != OpConst32 { + if v_1.Op != OpOr16 { break } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { + if x != v_1.Args[0] { break } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + y := v_1.Args[1] + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Or32 x (Or32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Or32 i (Or32 z x)) + // match: (Or16 x (Or16 y x)) + // cond: + // result: (Or16 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { + if v_1.Op != OpOr16 { break } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { + y := v_1.Args[0] + if x != v_1.Args[1] { break } - v.reset(OpOr32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) return true } - // match: (Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) + // match: (Or16 (Or16 x y) x) // cond: - // result: (Or32 (Const32 [int64(int32(c|d))]) x) + // result: (Or16 x y) for { v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { + if v_0.Op != OpOr16 { break } - if v_1_0.Type != t { + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { break } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) + v.reset(OpOr16) v.AddArg(x) + v.AddArg(y) return true } - // match: (Or32 (Const32 [c]) (Or32 x (Const32 [d]))) + // match: (Or16 (Or16 x y) y) // cond: - // result: (Or32 (Const32 [int64(int32(c|d))]) x) + // result: (Or16 x y) for { v_0 := v.Args[0] - if v_0.Op != OpConst32 { + if v_0.Op != OpOr16 { break } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr32 { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + v.reset(OpOr16) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or16 x l:(Or16 _ _)) + // cond: (x.Op != OpOr16 && x.Op != OpConst16) + // result: (Or16 l x) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpOr16 { break } - if v_1_1.Type != t { + if !(x.Op != OpOr16 && x.Op != OpConst16) { break } - d := v_1_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) - v.AddArg(v0) + v.reset(OpOr16) + v.AddArg(l) v.AddArg(x) return true } - // match: (Or32 (Or32 (Const32 [d]) x) (Const32 [c])) - // cond: - // result: (Or32 (Const32 [int64(int32(c|d))]) x) + // match: (Or16 (Or16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Or16 i (Or16 z x)) for { v_0 := v.Args[0] - if v_0.Op != OpOr32 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { + if v_0.Op != OpOr16 { break } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + i := v_0.Args[0] + if i.Op != OpConst16 { break } - if v_1.Type != t { + t := i.Type + z := v_0.Args[1] + x := v.Args[1] + if !(z.Op != OpConst16 && x.Op != OpConst16) { break } - c := v_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) + v.reset(OpOr16) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr16, t) + v0.AddArg(z) + v0.AddArg(x) v.AddArg(v0) - v.AddArg(x) return true } - // match: (Or32 (Or32 x (Const32 [d])) (Const32 [c])) + // match: (Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) // cond: - // result: (Or32 (Const32 [int64(int32(c|d))]) x) + // result: (Or16 (Const16 [int64(int16(c|d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpOr32 { + if v_0.Op != OpConst16 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpOr16 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c | d)) + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int64(int16(c | d)) v.AddArg(v0) v.AddArg(x) return true } return false } -func rewriteValuegeneric_OpOr64(v *Value) bool { +func rewriteValuegeneric_OpOr32(v *Value) bool { b := v.Block _ = b - // match: (Or64 (Const64 [c]) (Const64 [d])) + // match: (Or32 (Const32 [c]) (Const32 [d])) // cond: - // result: (Const64 [c|d]) + // result: (Const32 [int64(int32(c|d))]) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } d := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c | d + v.reset(OpConst32) + v.AuxInt = int64(int32(c | d)) return true } - // match: (Or64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (Const64 [c|d]) + // match: (Or32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Or32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpConst32 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c | d + if !(x.Op != OpConst32) { + break + } + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (Or64 x x) + // match: (Or32 x x) // cond: // result: x for { @@ -15981,12 +11228,12 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { v.AddArg(x) return true } - // match: (Or64 (Const64 [0]) x) + // match: (Or32 (Const32 [0]) x) // cond: // result: x for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } if v_0.AuxInt != 0 { @@ -15998,95 +11245,63 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { v.AddArg(x) return true } - // match: (Or64 x (Const64 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or64 (Const64 [-1]) _) - // cond: - // result: (Const64 [-1]) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - if v_0.AuxInt != -1 { - break - } - v.reset(OpConst64) - v.AuxInt = -1 - return true - } - // match: (Or64 _ (Const64 [-1])) + // match: (Or32 (Const32 [-1]) _) // cond: - // result: (Const64 [-1]) + // result: (Const32 [-1]) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpConst32 { break } - if v_1.AuxInt != -1 { + if v_0.AuxInt != -1 { break } - v.reset(OpConst64) + v.reset(OpConst32) v.AuxInt = -1 return true } - // match: (Or64 x (Or64 x y)) + // match: (Or32 x (Or32 x y)) // cond: - // result: (Or64 x y) + // result: (Or32 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr64 { + if v_1.Op != OpOr32 { break } if x != v_1.Args[0] { break } y := v_1.Args[1] - v.reset(OpOr64) + v.reset(OpOr32) v.AddArg(x) v.AddArg(y) return true } - // match: (Or64 x (Or64 y x)) + // match: (Or32 x (Or32 y x)) // cond: - // result: (Or64 x y) + // result: (Or32 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr64 { + if v_1.Op != OpOr32 { break } y := v_1.Args[0] if x != v_1.Args[1] { break } - v.reset(OpOr64) + v.reset(OpOr32) v.AddArg(x) v.AddArg(y) return true } - // match: (Or64 (Or64 x y) x) + // match: (Or32 (Or32 x y) x) // cond: - // result: (Or64 x y) + // result: (Or32 x y) for { v_0 := v.Args[0] - if v_0.Op != OpOr64 { + if v_0.Op != OpOr32 { break } x := v_0.Args[0] @@ -16094,149 +11309,88 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { if x != v.Args[1] { break } - v.reset(OpOr64) + v.reset(OpOr32) v.AddArg(x) v.AddArg(y) return true } - // match: (Or64 (Or64 y x) x) + // match: (Or32 (Or32 x y) y) // cond: - // result: (Or64 x y) + // result: (Or32 x y) for { v_0 := v.Args[0] - if v_0.Op != OpOr64 { + if v_0.Op != OpOr32 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } - v.reset(OpOr64) + v.reset(OpOr32) v.AddArg(x) v.AddArg(y) return true } - // match: (Or64 (Or64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) + // match: (Or32 x l:(Or32 _ _)) + // cond: (x.Op != OpOr32 && x.Op != OpConst32) + // result: (Or32 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - i := v_0.Args[0] - if i.Op != OpConst64 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpOr32 { break } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { + if !(x.Op != OpOr32 && x.Op != OpConst32) { break } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpOr32) + v.AddArg(l) + v.AddArg(x) return true } - // match: (Or64 (Or64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) + // match: (Or32 (Or32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Or32 i (Or32 z x)) for { v_0 := v.Args[0] - if v_0.Op != OpOr64 { + if v_0.Op != OpOr32 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { + i := v_0.Args[0] + if i.Op != OpConst32 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or64 x (Or64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpOr64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Or64 x (Or64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Or64 i (Or64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { + if !(z.Op != OpConst32 && x.Op != OpConst32) { break } - v.reset(OpOr64) + v.reset(OpOr32) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr64, t) + v0 := b.NewValue0(v.Pos, OpOr32, t) v0.AddArg(z) v0.AddArg(x) v.AddArg(v0) return true } - // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) + // match: (Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) // cond: - // result: (Or64 (Const64 [c|d]) x) + // result: (Or32 (Const32 [int64(int32(c|d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpConst64 { + if v_0.Op != OpConst32 { break } t := v_0.Type c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpOr64 { + if v_1.Op != OpOr32 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { + if v_1_0.Op != OpConst32 { break } if v_1_0.Type != t { @@ -16244,145 +11398,58 @@ func rewriteValuegeneric_OpOr64(v *Value) bool { } d := v_1_0.AuxInt x := v_1.Args[1] - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Const64 [c]) (Or64 x (Const64 [d]))) - // cond: - // result: (Or64 (Const64 [c|d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Or64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (Or64 (Const64 [c|d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Or64 (Or64 x (Const64 [d])) (Const64 [c])) - // cond: - // result: (Or64 (Const64 [c|d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpOr64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c | d + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int64(int32(c | d)) v.AddArg(v0) v.AddArg(x) return true } return false } -func rewriteValuegeneric_OpOr8(v *Value) bool { +func rewriteValuegeneric_OpOr64(v *Value) bool { b := v.Block _ = b - // match: (Or8 (Const8 [c]) (Const8 [d])) + // match: (Or64 (Const64 [c]) (Const64 [d])) // cond: - // result: (Const8 [int64(int8(c|d))]) + // result: (Const64 [c|d]) for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpConst8 { + if v_1.Op != OpConst64 { break } d := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c | d)) + v.reset(OpConst64) + v.AuxInt = c | d return true } - // match: (Or8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (Const8 [int64(int8(c|d))]) + // match: (Or64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Or64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - d := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst64) { break } - c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c | d)) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (Or8 x x) + // match: (Or64 x x) // cond: // result: x for { @@ -16395,12 +11462,12 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { v.AddArg(x) return true } - // match: (Or8 (Const8 [0]) x) + // match: (Or64 (Const64 [0]) x) // cond: // result: x for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != 0 { @@ -16412,95 +11479,63 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { v.AddArg(x) return true } - // match: (Or8 x (Const8 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Or8 (Const8 [-1]) _) + // match: (Or64 (Const64 [-1]) _) // cond: - // result: (Const8 [-1]) + // result: (Const64 [-1]) for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpConst64 { break } if v_0.AuxInt != -1 { break } - v.reset(OpConst8) - v.AuxInt = -1 - return true - } - // match: (Or8 _ (Const8 [-1])) - // cond: - // result: (Const8 [-1]) - for { - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != -1 { - break - } - v.reset(OpConst8) + v.reset(OpConst64) v.AuxInt = -1 return true } - // match: (Or8 x (Or8 x y)) + // match: (Or64 x (Or64 x y)) // cond: - // result: (Or8 x y) + // result: (Or64 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr8 { + if v_1.Op != OpOr64 { break } if x != v_1.Args[0] { break } y := v_1.Args[1] - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg(x) v.AddArg(y) return true } - // match: (Or8 x (Or8 y x)) + // match: (Or64 x (Or64 y x)) // cond: - // result: (Or8 x y) + // result: (Or64 x y) for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpOr8 { + if v_1.Op != OpOr64 { break } y := v_1.Args[0] if x != v_1.Args[1] { break } - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg(x) v.AddArg(y) return true } - // match: (Or8 (Or8 x y) x) + // match: (Or64 (Or64 x y) x) // cond: - // result: (Or8 x y) + // result: (Or64 x y) for { v_0 := v.Args[0] - if v_0.Op != OpOr8 { + if v_0.Op != OpOr64 { break } x := v_0.Args[0] @@ -16508,246 +11543,329 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { if x != v.Args[1] { break } - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg(x) v.AddArg(y) return true } - // match: (Or8 (Or8 y x) x) + // match: (Or64 (Or64 x y) y) // cond: - // result: (Or8 x y) + // result: (Or64 x y) for { v_0 := v.Args[0] - if v_0.Op != OpOr8 { + if v_0.Op != OpOr64 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg(x) v.AddArg(y) return true } - // match: (Or8 (Or8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) + // match: (Or64 x l:(Or64 _ _)) + // cond: (x.Op != OpOr64 && x.Op != OpConst64) + // result: (Or64 l x) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpOr64 { + break + } + if !(x.Op != OpOr64 && x.Op != OpConst64) { + break + } + v.reset(OpOr64) + v.AddArg(l) + v.AddArg(x) + return true + } + // match: (Or64 (Or64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Or64 i (Or64 z x)) for { v_0 := v.Args[0] - if v_0.Op != OpOr8 { + if v_0.Op != OpOr64 { break } i := v_0.Args[0] - if i.Op != OpConst8 { + if i.Op != OpConst64 { break } t := i.Type z := v_0.Args[1] x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if !(z.Op != OpConst64 && x.Op != OpConst64) { break } - v.reset(OpOr8) + v.reset(OpOr64) v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) + v0 := b.NewValue0(v.Pos, OpOr64, t) v0.AddArg(z) v0.AddArg(x) v.AddArg(v0) return true } - // match: (Or8 (Or8 z i:(Const8 )) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) + // match: (Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) + // cond: + // result: (Or64 (Const64 [c|d]) x) for { v_0 := v.Args[0] - if v_0.Op != OpOr8 { + if v_0.Op != OpConst64 { break } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst8 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpOr64 { break } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { break } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) + if v_1_0.Type != t { + break + } + d := v_1_0.AuxInt + x := v_1.Args[1] + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c | d v.AddArg(v0) + v.AddArg(x) return true } - // match: (Or8 x (Or8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) + return false +} +func rewriteValuegeneric_OpOr8(v *Value) bool { + b := v.Block + _ = b + // match: (Or8 (Const8 [c]) (Const8 [d])) + // cond: + // result: (Const8 [int64(int8(c|d))]) for { - x := v.Args[0] + v_0 := v.Args[0] + if v_0.Op != OpConst8 { + break + } + c := v_0.AuxInt v_1 := v.Args[1] - if v_1.Op != OpOr8 { + if v_1.Op != OpConst8 { break } - i := v_1.Args[0] - if i.Op != OpConst8 { + d := v_1.AuxInt + v.reset(OpConst8) + v.AuxInt = int64(int8(c | d)) + return true + } + // match: (Or8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Or8 (Const8 [c]) x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { break } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + t := v_1.Type + c := v_1.AuxInt + if !(x.Op != OpConst8) { break } v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c v.AddArg(v0) + v.AddArg(x) return true } - // match: (Or8 x (Or8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Or8 i (Or8 z x)) + // match: (Or8 x x) + // cond: + // result: x for { x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpOr8 { + if x != v.Args[1] { break } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Or8 (Const8 [0]) x) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpConst8 { break } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if v_0.AuxInt != 0 { break } - v.reset(OpOr8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpOr8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) + // match: (Or8 (Const8 [-1]) _) // cond: - // result: (Or8 (Const8 [int64(int8(c|d))]) x) + // result: (Const8 [-1]) for { v_0 := v.Args[0] if v_0.Op != OpConst8 { break } - t := v_0.Type - c := v_0.AuxInt + if v_0.AuxInt != -1 { + break + } + v.reset(OpConst8) + v.AuxInt = -1 + return true + } + // match: (Or8 x (Or8 x y)) + // cond: + // result: (Or8 x y) + for { + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpOr8 { break } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst8 { + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 x (Or8 y x)) + // cond: + // result: (Or8 x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpOr8 { + break + } + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 (Or8 x y) x) + // cond: + // result: (Or8 x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpOr8 { break } - if v_1_0.Type != t { + x := v_0.Args[0] + y := v_0.Args[1] + if x != v.Args[1] { break } - d := v_1_0.AuxInt - x := v_1.Args[1] v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) v.AddArg(x) + v.AddArg(y) return true } - // match: (Or8 (Const8 [c]) (Or8 x (Const8 [d]))) + // match: (Or8 (Or8 x y) y) // cond: - // result: (Or8 (Const8 [int64(int8(c|d))]) x) + // result: (Or8 x y) for { v_0 := v.Args[0] - if v_0.Op != OpConst8 { + if v_0.Op != OpOr8 { break } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpOr8 { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { + v.reset(OpOr8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (Or8 x l:(Or8 _ _)) + // cond: (x.Op != OpOr8 && x.Op != OpConst8) + // result: (Or8 l x) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpOr8 { break } - if v_1_1.Type != t { + if !(x.Op != OpOr8 && x.Op != OpConst8) { break } - d := v_1_1.AuxInt v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) - v.AddArg(v0) + v.AddArg(l) v.AddArg(x) return true } - // match: (Or8 (Or8 (Const8 [d]) x) (Const8 [c])) - // cond: - // result: (Or8 (Const8 [int64(int8(c|d))]) x) + // match: (Or8 (Or8 i:(Const8 ) z) x) + // cond: (z.Op != OpConst8 && x.Op != OpConst8) + // result: (Or8 i (Or8 z x)) for { v_0 := v.Args[0] if v_0.Op != OpOr8 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + i := v_0.Args[0] + if i.Op != OpConst8 { break } - if v_1.Type != t { + t := i.Type + z := v_0.Args[1] + x := v.Args[1] + if !(z.Op != OpConst8 && x.Op != OpConst8) { break } - c := v_1.AuxInt v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c | d)) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpOr8, t) + v0.AddArg(z) + v0.AddArg(x) v.AddArg(v0) - v.AddArg(x) return true } - // match: (Or8 (Or8 x (Const8 [d])) (Const8 [c])) + // match: (Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) // cond: // result: (Or8 (Const8 [int64(int8(c|d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpOr8 { + if v_0.Op != OpConst8 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpOr8 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst8 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpOr8) v0 := b.NewValue0(v.Pos, OpConst8, t) v0.AuxInt = int64(int8(c | d)) @@ -16758,7 +11876,7 @@ func rewriteValuegeneric_OpOr8(v *Value) bool { return false } func rewriteValuegeneric_OpPhi(v *Value) bool { - // match: (Phi (Const8 [c]) (Const8 [c])) + // match: (Phi (Const8 [c]) (Const8 [c])) // cond: // result: (Const8 [c]) for { @@ -17148,7 +12266,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) + // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) // cond: // result: (ZeroExt8to16 (Trunc16to8 x)) for { @@ -17182,7 +12300,7 @@ func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool { func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16Ux8 x (Const8 [c])) + // match: (Rsh16Ux8 x (Const8 [c])) // cond: // result: (Rsh16Ux64 x (Const64 [int64(uint8(c))])) for { @@ -17220,7 +12338,7 @@ func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool { func rewriteValuegeneric_OpRsh16x16(v *Value) bool { b := v.Block _ = b - // match: (Rsh16x16 x (Const16 [c])) + // match: (Rsh16x16 x (Const16 [c])) // cond: // result: (Rsh16x64 x (Const64 [int64(uint16(c))])) for { @@ -17238,7 +12356,7 @@ func rewriteValuegeneric_OpRsh16x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x16 (Const16 [0]) _) + // match: (Rsh16x16 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -17258,7 +12376,7 @@ func rewriteValuegeneric_OpRsh16x16(v *Value) bool { func rewriteValuegeneric_OpRsh16x32(v *Value) bool { b := v.Block _ = b - // match: (Rsh16x32 x (Const32 [c])) + // match: (Rsh16x32 x (Const32 [c])) // cond: // result: (Rsh16x64 x (Const64 [int64(uint32(c))])) for { @@ -17276,7 +12394,7 @@ func rewriteValuegeneric_OpRsh16x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x32 (Const16 [0]) _) + // match: (Rsh16x32 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -17298,7 +12416,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh16x64 (Const16 [c]) (Const64 [d])) + // match: (Rsh16x64 (Const16 [c]) (Const64 [d])) // cond: // result: (Const16 [int64(int16(c) >> uint64(d))]) for { @@ -17316,7 +12434,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { v.AuxInt = int64(int16(c) >> uint64(d)) return true } - // match: (Rsh16x64 x (Const64 [0])) + // match: (Rsh16x64 x (Const64 [0])) // cond: // result: x for { @@ -17333,7 +12451,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh16x64 (Const16 [0]) _) + // match: (Rsh16x64 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -17378,7 +12496,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) + // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) // cond: // result: (SignExt8to16 (Trunc16to8 x)) for { @@ -17412,7 +12530,7 @@ func rewriteValuegeneric_OpRsh16x64(v *Value) bool { func rewriteValuegeneric_OpRsh16x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh16x8 x (Const8 [c])) + // match: (Rsh16x8 x (Const8 [c])) // cond: // result: (Rsh16x64 x (Const64 [int64(uint8(c))])) for { @@ -17430,7 +12548,7 @@ func rewriteValuegeneric_OpRsh16x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh16x8 (Const16 [0]) _) + // match: (Rsh16x8 (Const16 [0]) _) // cond: // result: (Const16 [0]) for { @@ -17725,7 +12843,7 @@ func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool { func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32Ux8 x (Const8 [c])) + // match: (Rsh32Ux8 x (Const8 [c])) // cond: // result: (Rsh32Ux64 x (Const64 [int64(uint8(c))])) for { @@ -17763,7 +12881,7 @@ func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool { func rewriteValuegeneric_OpRsh32x16(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x16 x (Const16 [c])) + // match: (Rsh32x16 x (Const16 [c])) // cond: // result: (Rsh32x64 x (Const64 [int64(uint16(c))])) for { @@ -17781,7 +12899,7 @@ func rewriteValuegeneric_OpRsh32x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh32x16 (Const32 [0]) _) + // match: (Rsh32x16 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -17801,7 +12919,7 @@ func rewriteValuegeneric_OpRsh32x16(v *Value) bool { func rewriteValuegeneric_OpRsh32x32(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x32 x (Const32 [c])) + // match: (Rsh32x32 x (Const32 [c])) // cond: // result: (Rsh32x64 x (Const64 [int64(uint32(c))])) for { @@ -17819,7 +12937,7 @@ func rewriteValuegeneric_OpRsh32x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh32x32 (Const32 [0]) _) + // match: (Rsh32x32 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -17841,7 +12959,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh32x64 (Const32 [c]) (Const64 [d])) + // match: (Rsh32x64 (Const32 [c]) (Const64 [d])) // cond: // result: (Const32 [int64(int32(c) >> uint64(d))]) for { @@ -17859,7 +12977,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { v.AuxInt = int64(int32(c) >> uint64(d)) return true } - // match: (Rsh32x64 x (Const64 [0])) + // match: (Rsh32x64 x (Const64 [0])) // cond: // result: x for { @@ -17876,7 +12994,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh32x64 (Const32 [0]) _) + // match: (Rsh32x64 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -17984,7 +13102,7 @@ func rewriteValuegeneric_OpRsh32x64(v *Value) bool { func rewriteValuegeneric_OpRsh32x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh32x8 x (Const8 [c])) + // match: (Rsh32x8 x (Const8 [c])) // cond: // result: (Rsh32x64 x (Const64 [int64(uint8(c))])) for { @@ -18002,7 +13120,7 @@ func rewriteValuegeneric_OpRsh32x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh32x8 (Const32 [0]) _) + // match: (Rsh32x8 (Const32 [0]) _) // cond: // result: (Const32 [0]) for { @@ -18326,7 +13444,7 @@ func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool { func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh64Ux8 x (Const8 [c])) + // match: (Rsh64Ux8 x (Const8 [c])) // cond: // result: (Rsh64Ux64 x (Const64 [int64(uint8(c))])) for { @@ -18364,7 +13482,7 @@ func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool { func rewriteValuegeneric_OpRsh64x16(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x16 x (Const16 [c])) + // match: (Rsh64x16 x (Const16 [c])) // cond: // result: (Rsh64x64 x (Const64 [int64(uint16(c))])) for { @@ -18382,7 +13500,7 @@ func rewriteValuegeneric_OpRsh64x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh64x16 (Const64 [0]) _) + // match: (Rsh64x16 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -18402,7 +13520,7 @@ func rewriteValuegeneric_OpRsh64x16(v *Value) bool { func rewriteValuegeneric_OpRsh64x32(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x32 x (Const32 [c])) + // match: (Rsh64x32 x (Const32 [c])) // cond: // result: (Rsh64x64 x (Const64 [int64(uint32(c))])) for { @@ -18420,7 +13538,7 @@ func rewriteValuegeneric_OpRsh64x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh64x32 (Const64 [0]) _) + // match: (Rsh64x32 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -18442,7 +13560,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh64x64 (Const64 [c]) (Const64 [d])) + // match: (Rsh64x64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c >> uint64(d)]) for { @@ -18460,7 +13578,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { v.AuxInt = c >> uint64(d) return true } - // match: (Rsh64x64 x (Const64 [0])) + // match: (Rsh64x64 x (Const64 [0])) // cond: // result: x for { @@ -18477,7 +13595,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh64x64 (Const64 [0]) _) + // match: (Rsh64x64 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -18614,7 +13732,7 @@ func rewriteValuegeneric_OpRsh64x64(v *Value) bool { func rewriteValuegeneric_OpRsh64x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x8 x (Const8 [c])) + // match: (Rsh64x8 x (Const8 [c])) // cond: // result: (Rsh64x64 x (Const64 [int64(uint8(c))])) for { @@ -18632,7 +13750,7 @@ func rewriteValuegeneric_OpRsh64x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh64x8 (Const64 [0]) _) + // match: (Rsh64x8 (Const64 [0]) _) // cond: // result: (Const64 [0]) for { @@ -18670,7 +13788,7 @@ func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux16 (Const8 [0]) _) + // match: (Rsh8Ux16 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18708,7 +13826,7 @@ func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux32 (Const8 [0]) _) + // match: (Rsh8Ux32 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18730,7 +13848,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { _ = b types := &b.Func.Config.Types _ = types - // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d])) + // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d])) // cond: // result: (Const8 [int64(int8(uint8(c) >> uint64(d)))]) for { @@ -18748,7 +13866,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { v.AuxInt = int64(int8(uint8(c) >> uint64(d))) return true } - // match: (Rsh8Ux64 x (Const64 [0])) + // match: (Rsh8Ux64 x (Const64 [0])) // cond: // result: x for { @@ -18765,7 +13883,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh8Ux64 (Const8 [0]) _) + // match: (Rsh8Ux64 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18780,7 +13898,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Rsh8Ux64 _ (Const64 [c])) + // match: (Rsh8Ux64 _ (Const64 [c])) // cond: uint64(c) >= 8 // result: (Const8 [0]) for { @@ -18796,7 +13914,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Rsh8Ux64 (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) + // match: (Rsh8Ux64 (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) // result: (Rsh8Ux64 x (Const64 [c+d])) for { @@ -18869,7 +13987,7 @@ func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool { func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8Ux8 x (Const8 [c])) + // match: (Rsh8Ux8 x (Const8 [c])) // cond: // result: (Rsh8Ux64 x (Const64 [int64(uint8(c))])) for { @@ -18887,7 +14005,7 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8Ux8 (Const8 [0]) _) + // match: (Rsh8Ux8 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18907,7 +14025,7 @@ func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool { func rewriteValuegeneric_OpRsh8x16(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x16 x (Const16 [c])) + // match: (Rsh8x16 x (Const16 [c])) // cond: // result: (Rsh8x64 x (Const64 [int64(uint16(c))])) for { @@ -18925,7 +14043,7 @@ func rewriteValuegeneric_OpRsh8x16(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x16 (Const8 [0]) _) + // match: (Rsh8x16 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18945,7 +14063,7 @@ func rewriteValuegeneric_OpRsh8x16(v *Value) bool { func rewriteValuegeneric_OpRsh8x32(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x32 x (Const32 [c])) + // match: (Rsh8x32 x (Const32 [c])) // cond: // result: (Rsh8x64 x (Const64 [int64(uint32(c))])) for { @@ -18963,7 +14081,7 @@ func rewriteValuegeneric_OpRsh8x32(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x32 (Const8 [0]) _) + // match: (Rsh8x32 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -18983,7 +14101,7 @@ func rewriteValuegeneric_OpRsh8x32(v *Value) bool { func rewriteValuegeneric_OpRsh8x64(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x64 (Const8 [c]) (Const64 [d])) + // match: (Rsh8x64 (Const8 [c]) (Const64 [d])) // cond: // result: (Const8 [int64(int8(c) >> uint64(d))]) for { @@ -19001,7 +14119,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { v.AuxInt = int64(int8(c) >> uint64(d)) return true } - // match: (Rsh8x64 x (Const64 [0])) + // match: (Rsh8x64 x (Const64 [0])) // cond: // result: x for { @@ -19018,7 +14136,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { v.AddArg(x) return true } - // match: (Rsh8x64 (Const8 [0]) _) + // match: (Rsh8x64 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -19033,7 +14151,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { v.AuxInt = 0 return true } - // match: (Rsh8x64 (Rsh8x64 x (Const64 [c])) (Const64 [d])) + // match: (Rsh8x64 (Rsh8x64 x (Const64 [c])) (Const64 [d])) // cond: !uaddOvf(c,d) // result: (Rsh8x64 x (Const64 [c+d])) for { @@ -19068,7 +14186,7 @@ func rewriteValuegeneric_OpRsh8x64(v *Value) bool { func rewriteValuegeneric_OpRsh8x8(v *Value) bool { b := v.Block _ = b - // match: (Rsh8x8 x (Const8 [c])) + // match: (Rsh8x8 x (Const8 [c])) // cond: // result: (Rsh8x64 x (Const64 [int64(uint8(c))])) for { @@ -19086,7 +14204,7 @@ func rewriteValuegeneric_OpRsh8x8(v *Value) bool { v.AddArg(v0) return true } - // match: (Rsh8x8 (Const8 [0]) _) + // match: (Rsh8x8 (Const8 [0]) _) // cond: // result: (Const8 [0]) for { @@ -19227,7 +14345,7 @@ func rewriteValuegeneric_OpSignExt32to64(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { - // match: (SignExt8to16 (Const8 [c])) + // match: (SignExt8to16 (Const8 [c])) // cond: // result: (Const16 [int64( int8(c))]) for { @@ -19240,7 +14358,7 @@ func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) + // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) // cond: s >= 8 // result: x for { @@ -19268,7 +14386,7 @@ func rewriteValuegeneric_OpSignExt8to16(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { - // match: (SignExt8to32 (Const8 [c])) + // match: (SignExt8to32 (Const8 [c])) // cond: // result: (Const32 [int64( int8(c))]) for { @@ -19281,7 +14399,7 @@ func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) + // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) // cond: s >= 24 // result: x for { @@ -19309,7 +14427,7 @@ func rewriteValuegeneric_OpSignExt8to32(v *Value) bool { return false } func rewriteValuegeneric_OpSignExt8to64(v *Value) bool { - // match: (SignExt8to64 (Const8 [c])) + // match: (SignExt8to64 (Const8 [c])) // cond: // result: (Const64 [int64( int8(c))]) for { @@ -19322,7 +14440,7 @@ func rewriteValuegeneric_OpSignExt8to64(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) + // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) // cond: s >= 56 // result: x for { @@ -20167,7 +15285,7 @@ func rewriteValuegeneric_OpStructSelect(v *Value) bool { func rewriteValuegeneric_OpSub16(v *Value) bool { b := v.Block _ = b - // match: (Sub16 (Const16 [c]) (Const16 [d])) + // match: (Sub16 (Const16 [c]) (Const16 [d])) // cond: // result: (Const16 [int64(int16(c-d))]) for { @@ -20236,24 +15354,6 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.AddArg(y) return true } - // match: (Sub16 (Add16 y x) x) - // cond: - // result: y - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } // match: (Sub16 (Add16 x y) y) // cond: // result: x @@ -20272,24 +15372,6 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { v.AddArg(x) return true } - // match: (Sub16 (Add16 y x) y) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd16 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if y != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Sub16 x (Sub16 i:(Const16 ) z)) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Sub16 (Add16 x z) i) @@ -20407,7 +15489,7 @@ func rewriteValuegeneric_OpSub16(v *Value) bool { func rewriteValuegeneric_OpSub32(v *Value) bool { b := v.Block _ = b - // match: (Sub32 (Const32 [c]) (Const32 [d])) + // match: (Sub32 (Const32 [c]) (Const32 [d])) // cond: // result: (Const32 [int64(int32(c-d))]) for { @@ -20476,24 +15558,6 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.AddArg(y) return true } - // match: (Sub32 (Add32 y x) x) - // cond: - // result: y - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } // match: (Sub32 (Add32 x y) y) // cond: // result: x @@ -20512,24 +15576,6 @@ func rewriteValuegeneric_OpSub32(v *Value) bool { v.AddArg(x) return true } - // match: (Sub32 (Add32 y x) y) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd32 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if y != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Sub32 x (Sub32 i:(Const32 ) z)) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Sub32 (Add32 x z) i) @@ -20685,7 +15731,7 @@ func rewriteValuegeneric_OpSub32F(v *Value) bool { func rewriteValuegeneric_OpSub64(v *Value) bool { b := v.Block _ = b - // match: (Sub64 (Const64 [c]) (Const64 [d])) + // match: (Sub64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c-d]) for { @@ -20754,24 +15800,6 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.AddArg(y) return true } - // match: (Sub64 (Add64 y x) x) - // cond: - // result: y - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } // match: (Sub64 (Add64 x y) y) // cond: // result: x @@ -20790,24 +15818,6 @@ func rewriteValuegeneric_OpSub64(v *Value) bool { v.AddArg(x) return true } - // match: (Sub64 (Add64 y x) y) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd64 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if y != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Sub64 x (Sub64 i:(Const64 ) z)) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Sub64 (Add64 x z) i) @@ -20963,7 +15973,7 @@ func rewriteValuegeneric_OpSub64F(v *Value) bool { func rewriteValuegeneric_OpSub8(v *Value) bool { b := v.Block _ = b - // match: (Sub8 (Const8 [c]) (Const8 [d])) + // match: (Sub8 (Const8 [c]) (Const8 [d])) // cond: // result: (Const8 [int64(int8(c-d))]) for { @@ -20981,7 +15991,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AuxInt = int64(int8(c - d)) return true } - // match: (Sub8 x (Const8 [c])) + // match: (Sub8 x (Const8 [c])) // cond: x.Op != OpConst8 // result: (Add8 (Const8 [int64(int8(-c))]) x) for { @@ -21002,7 +16012,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(x) return true } - // match: (Sub8 x x) + // match: (Sub8 x x) // cond: // result: (Const8 [0]) for { @@ -21014,7 +16024,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AuxInt = 0 return true } - // match: (Sub8 (Add8 x y) x) + // match: (Sub8 (Add8 x y) x) // cond: // result: y for { @@ -21032,25 +16042,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(y) return true } - // match: (Sub8 (Add8 y x) x) - // cond: - // result: y - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Sub8 (Add8 x y) y) + // match: (Sub8 (Add8 x y) y) // cond: // result: x for { @@ -21068,25 +16060,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(x) return true } - // match: (Sub8 (Add8 y x) y) - // cond: - // result: x - for { - v_0 := v.Args[0] - if v_0.Op != OpAdd8 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if y != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Sub8 x (Sub8 i:(Const8 ) z)) + // match: (Sub8 x (Sub8 i:(Const8 ) z)) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Sub8 (Add8 x z) i) for { @@ -21112,7 +16086,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(i) return true } - // match: (Sub8 x (Sub8 z i:(Const8 ))) + // match: (Sub8 x (Sub8 z i:(Const8 ))) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) for { @@ -21138,7 +16112,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(v0) return true } - // match: (Sub8 (Const8 [c]) (Sub8 x (Const8 [d]))) + // match: (Sub8 (Const8 [c]) (Sub8 x (Const8 [d]))) // cond: // result: (Sub8 (Const8 [int64(int8(c+d))]) x) for { @@ -21168,7 +16142,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { v.AddArg(x) return true } - // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) + // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // cond: // result: (Add8 (Const8 [int64(int8(c-d))]) x) for { @@ -21201,7 +16175,7 @@ func rewriteValuegeneric_OpSub8(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { - // match: (Trunc16to8 (Const16 [c])) + // match: (Trunc16to8 (Const16 [c])) // cond: // result: (Const8 [int64(int8(c))]) for { @@ -21214,7 +16188,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (Trunc16to8 (ZeroExt8to16 x)) + // match: (Trunc16to8 (ZeroExt8to16 x)) // cond: // result: x for { @@ -21228,7 +16202,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc16to8 (SignExt8to16 x)) + // match: (Trunc16to8 (SignExt8to16 x)) // cond: // result: x for { @@ -21242,7 +16216,7 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc16to8 (And16 (Const16 [y]) x)) + // match: (Trunc16to8 (And16 (Const16 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc16to8 x) for { @@ -21263,27 +16237,6 @@ func rewriteValuegeneric_OpTrunc16to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc16to8 (And16 x (Const16 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc16to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd16 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { - break - } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc16to8) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { @@ -21300,7 +16253,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { v.AuxInt = int64(int16(c)) return true } - // match: (Trunc32to16 (ZeroExt8to32 x)) + // match: (Trunc32to16 (ZeroExt8to32 x)) // cond: // result: (ZeroExt8to16 x) for { @@ -21327,7 +16280,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc32to16 (SignExt8to32 x)) + // match: (Trunc32to16 (SignExt8to32 x)) // cond: // result: (SignExt8to16 x) for { @@ -21362,33 +16315,12 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { if v_0.Op != OpAnd32 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - y := v_0_0.AuxInt - x := v_0.Args[1] - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc32to16) - v.AddArg(x) - return true - } - // match: (Trunc32to16 (And32 x (Const32 [y]))) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc32to16 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - y := v_0_1.AuxInt + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpConst32 { + break + } + y := v_0_0.AuxInt + x := v_0.Args[1] if !(y&0xFFFF == 0xFFFF) { break } @@ -21399,7 +16331,7 @@ func rewriteValuegeneric_OpTrunc32to16(v *Value) bool { return false } func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { - // match: (Trunc32to8 (Const32 [c])) + // match: (Trunc32to8 (Const32 [c])) // cond: // result: (Const8 [int64(int8(c))]) for { @@ -21412,7 +16344,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (Trunc32to8 (ZeroExt8to32 x)) + // match: (Trunc32to8 (ZeroExt8to32 x)) // cond: // result: x for { @@ -21426,7 +16358,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc32to8 (SignExt8to32 x)) + // match: (Trunc32to8 (SignExt8to32 x)) // cond: // result: x for { @@ -21440,7 +16372,7 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc32to8 (And32 (Const32 [y]) x)) + // match: (Trunc32to8 (And32 (Const32 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc32to8 x) for { @@ -21461,27 +16393,6 @@ func rewriteValuegeneric_OpTrunc32to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc32to8 (And32 x (Const32 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc32to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd32 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { - break - } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc32to8) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { @@ -21498,7 +16409,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { v.AuxInt = int64(int16(c)) return true } - // match: (Trunc64to16 (ZeroExt8to64 x)) + // match: (Trunc64to16 (ZeroExt8to64 x)) // cond: // result: (ZeroExt8to16 x) for { @@ -21525,7 +16436,7 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to16 (SignExt8to64 x)) + // match: (Trunc64to16 (SignExt8to64 x)) // cond: // result: (SignExt8to16 x) for { @@ -21573,27 +16484,6 @@ func rewriteValuegeneric_OpTrunc64to16(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to16 (And64 x (Const64 [y]))) - // cond: y&0xFFFF == 0xFFFF - // result: (Trunc64to16 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - y := v_0_1.AuxInt - if !(y&0xFFFF == 0xFFFF) { - break - } - v.reset(OpTrunc64to16) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { @@ -21610,7 +16500,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { v.AuxInt = int64(int32(c)) return true } - // match: (Trunc64to32 (ZeroExt8to64 x)) + // match: (Trunc64to32 (ZeroExt8to64 x)) // cond: // result: (ZeroExt8to32 x) for { @@ -21650,7 +16540,7 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to32 (SignExt8to64 x)) + // match: (Trunc64to32 (SignExt8to64 x)) // cond: // result: (SignExt8to32 x) for { @@ -21711,31 +16601,10 @@ func rewriteValuegeneric_OpTrunc64to32(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to32 (And64 x (Const64 [y]))) - // cond: y&0xFFFFFFFF == 0xFFFFFFFF - // result: (Trunc64to32 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - y := v_0_1.AuxInt - if !(y&0xFFFFFFFF == 0xFFFFFFFF) { - break - } - v.reset(OpTrunc64to32) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { - // match: (Trunc64to8 (Const64 [c])) + // match: (Trunc64to8 (Const64 [c])) // cond: // result: (Const8 [int64(int8(c))]) for { @@ -21748,7 +16617,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { v.AuxInt = int64(int8(c)) return true } - // match: (Trunc64to8 (ZeroExt8to64 x)) + // match: (Trunc64to8 (ZeroExt8to64 x)) // cond: // result: x for { @@ -21762,7 +16631,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to8 (SignExt8to64 x)) + // match: (Trunc64to8 (SignExt8to64 x)) // cond: // result: x for { @@ -21776,7 +16645,7 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to8 (And64 (Const64 [y]) x)) + // match: (Trunc64to8 (And64 (Const64 [y]) x)) // cond: y&0xFF == 0xFF // result: (Trunc64to8 x) for { @@ -21797,33 +16666,12 @@ func rewriteValuegeneric_OpTrunc64to8(v *Value) bool { v.AddArg(x) return true } - // match: (Trunc64to8 (And64 x (Const64 [y]))) - // cond: y&0xFF == 0xFF - // result: (Trunc64to8 x) - for { - v_0 := v.Args[0] - if v_0.Op != OpAnd64 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { - break - } - y := v_0_1.AuxInt - if !(y&0xFF == 0xFF) { - break - } - v.reset(OpTrunc64to8) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpXor16(v *Value) bool { b := v.Block _ = b - // match: (Xor16 (Const16 [c]) (Const16 [d])) + // match: (Xor16 (Const16 [c]) (Const16 [d])) // cond: // result: (Const16 [int64(int16(c^d))]) for { @@ -21841,22 +16689,25 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { v.AuxInt = int64(int16(c ^ d)) return true } - // match: (Xor16 (Const16 [d]) (Const16 [c])) - // cond: - // result: (Const16 [int64(int16(c^d))]) + // match: (Xor16 x (Const16 [c])) + // cond: x.Op != OpConst16 + // result: (Xor16 (Const16 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst16 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst16) - v.AuxInt = int64(int16(c ^ d)) + if !(x.Op != OpConst16) { + break + } + v.reset(OpXor16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Xor16 x x) @@ -21888,23 +16739,6 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { v.AddArg(x) return true } - // match: (Xor16 x (Const16 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Xor16 x (Xor16 x y)) // cond: // result: y @@ -21959,241 +16793,90 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { v.AddArg(y) return true } - // match: (Xor16 (Xor16 y x) x) + // match: (Xor16 (Xor16 x y) y) // cond: - // result: y + // result: x for { v_0 := v.Args[0] if v_0.Op != OpXor16 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor16 (Xor16 i:(Const16 ) z) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - i := v_0.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 (Xor16 z i:(Const16 )) x) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor16 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 x (Xor16 i:(Const16 ) z)) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - i := v_1.Args[0] - if i.Op != OpConst16 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 x (Xor16 z i:(Const16 ))) - // cond: (z.Op != OpConst16 && x.Op != OpConst16) - // result: (Xor16 i (Xor16 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst16 { - break - } - t := i.Type - if !(z.Op != OpConst16 && x.Op != OpConst16) { - break - } - v.reset(OpXor16) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor16, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) - // cond: - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst16 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) + v.Type = x.Type v.AddArg(x) return true } - // match: (Xor16 (Const16 [c]) (Xor16 x (Const16 [d]))) - // cond: - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) + // match: (Xor16 x l:(Xor16 _ _)) + // cond: (x.Op != OpXor16 && x.Op != OpConst16) + // result: (Xor16 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst16 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor16 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst16 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpXor16 { break } - if v_1_1.Type != t { + if !(x.Op != OpXor16 && x.Op != OpConst16) { break } - d := v_1_1.AuxInt v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) - v.AddArg(v0) + v.AddArg(l) v.AddArg(x) return true } - // match: (Xor16 (Xor16 (Const16 [d]) x) (Const16 [c])) - // cond: - // result: (Xor16 (Const16 [int64(int16(c^d))]) x) + // match: (Xor16 (Xor16 i:(Const16 ) z) x) + // cond: (z.Op != OpConst16 && x.Op != OpConst16) + // result: (Xor16 i (Xor16 z x)) for { v_0 := v.Args[0] if v_0.Op != OpXor16 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst16 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + i := v_0.Args[0] + if i.Op != OpConst16 { break } - if v_1.Type != t { + t := i.Type + z := v_0.Args[1] + x := v.Args[1] + if !(z.Op != OpConst16 && x.Op != OpConst16) { break } - c := v_1.AuxInt v.reset(OpXor16) - v0 := b.NewValue0(v.Pos, OpConst16, t) - v0.AuxInt = int64(int16(c ^ d)) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor16, t) + v0.AddArg(z) + v0.AddArg(x) v.AddArg(v0) - v.AddArg(x) return true } - // match: (Xor16 (Xor16 x (Const16 [d])) (Const16 [c])) + // match: (Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) // cond: // result: (Xor16 (Const16 [int64(int16(c^d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpXor16 { + if v_0.Op != OpConst16 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst16 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpXor16 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst16 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst16 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpXor16) v0 := b.NewValue0(v.Pos, OpConst16, t) v0.AuxInt = int64(int16(c ^ d)) @@ -22206,7 +16889,7 @@ func rewriteValuegeneric_OpXor16(v *Value) bool { func rewriteValuegeneric_OpXor32(v *Value) bool { b := v.Block _ = b - // match: (Xor32 (Const32 [c]) (Const32 [d])) + // match: (Xor32 (Const32 [c]) (Const32 [d])) // cond: // result: (Const32 [int64(int32(c^d))]) for { @@ -22224,22 +16907,25 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v.AuxInt = int64(int32(c ^ d)) return true } - // match: (Xor32 (Const32 [d]) (Const32 [c])) - // cond: - // result: (Const32 [int64(int32(c^d))]) + // match: (Xor32 x (Const32 [c])) + // cond: x.Op != OpConst32 + // result: (Xor32 (Const32 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst32 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst32) - v.AuxInt = int64(int32(c ^ d)) + if !(x.Op != OpConst32) { + break + } + v.reset(OpXor32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Xor32 x x) @@ -22271,23 +16957,6 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v.AddArg(x) return true } - // match: (Xor32 x (Const32 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Xor32 x (Xor32 x y)) // cond: // result: y @@ -22342,241 +17011,90 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { v.AddArg(y) return true } - // match: (Xor32 (Xor32 y x) x) + // match: (Xor32 (Xor32 x y) y) // cond: - // result: y - for { - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { - break - } - v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor32 (Xor32 i:(Const32 ) z) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor32 { - break - } - i := v_0.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 (Xor32 z i:(Const32 )) x) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) + // result: x for { v_0 := v.Args[0] if v_0.Op != OpXor32 { break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 x (Xor32 i:(Const32 ) z)) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - i := v_1.Args[0] - if i.Op != OpConst32 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 x (Xor32 z i:(Const32 ))) - // cond: (z.Op != OpConst32 && x.Op != OpConst32) - // result: (Xor32 i (Xor32 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst32 { - break - } - t := i.Type - if !(z.Op != OpConst32 && x.Op != OpConst32) { - break - } - v.reset(OpXor32) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor32, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) - // cond: - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor32 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst32 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor32 (Const32 [c]) (Xor32 x (Const32 [d]))) - // cond: - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst32 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor32 { + } + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst32 { + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (Xor32 x l:(Xor32 _ _)) + // cond: (x.Op != OpXor32 && x.Op != OpConst32) + // result: (Xor32 l x) + for { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpXor32 { break } - if v_1_1.Type != t { + if !(x.Op != OpXor32 && x.Op != OpConst32) { break } - d := v_1_1.AuxInt v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) - v.AddArg(v0) + v.AddArg(l) v.AddArg(x) return true } - // match: (Xor32 (Xor32 (Const32 [d]) x) (Const32 [c])) - // cond: - // result: (Xor32 (Const32 [int64(int32(c^d))]) x) + // match: (Xor32 (Xor32 i:(Const32 ) z) x) + // cond: (z.Op != OpConst32 && x.Op != OpConst32) + // result: (Xor32 i (Xor32 z x)) for { v_0 := v.Args[0] if v_0.Op != OpXor32 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst32 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + i := v_0.Args[0] + if i.Op != OpConst32 { break } - if v_1.Type != t { + t := i.Type + z := v_0.Args[1] + x := v.Args[1] + if !(z.Op != OpConst32 && x.Op != OpConst32) { break } - c := v_1.AuxInt v.reset(OpXor32) - v0 := b.NewValue0(v.Pos, OpConst32, t) - v0.AuxInt = int64(int32(c ^ d)) + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor32, t) + v0.AddArg(z) + v0.AddArg(x) v.AddArg(v0) - v.AddArg(x) return true } - // match: (Xor32 (Xor32 x (Const32 [d])) (Const32 [c])) + // match: (Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) // cond: // result: (Xor32 (Const32 [int64(int32(c^d))]) x) for { v_0 := v.Args[0] - if v_0.Op != OpXor32 { + if v_0.Op != OpConst32 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst32 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpXor32 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst32 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst32 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpXor32) v0 := b.NewValue0(v.Pos, OpConst32, t) v0.AuxInt = int64(int32(c ^ d)) @@ -22589,7 +17107,7 @@ func rewriteValuegeneric_OpXor32(v *Value) bool { func rewriteValuegeneric_OpXor64(v *Value) bool { b := v.Block _ = b - // match: (Xor64 (Const64 [c]) (Const64 [d])) + // match: (Xor64 (Const64 [c]) (Const64 [d])) // cond: // result: (Const64 [c^d]) for { @@ -22607,22 +17125,25 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { v.AuxInt = c ^ d return true } - // match: (Xor64 (Const64 [d]) (Const64 [c])) - // cond: - // result: (Const64 [c^d]) + // match: (Xor64 x (Const64 [c])) + // cond: x.Op != OpConst64 + // result: (Xor64 (Const64 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst64 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst64) - v.AuxInt = c ^ d + if !(x.Op != OpConst64) { + break + } + v.reset(OpXor64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } // match: (Xor64 x x) @@ -22654,23 +17175,6 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { v.AddArg(x) return true } - // match: (Xor64 x (Const64 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } // match: (Xor64 x (Xor64 x y)) // cond: // result: y @@ -22725,241 +17229,90 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { v.AddArg(y) return true } - // match: (Xor64 (Xor64 y x) x) + // match: (Xor64 (Xor64 x y) y) // cond: - // result: y + // result: x for { v_0 := v.Args[0] if v_0.Op != OpXor64 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) - return true - } - // match: (Xor64 (Xor64 i:(Const64 ) z) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - i := v_0.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 (Xor64 z i:(Const64 )) x) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor64 { - break - } - z := v_0.Args[0] - i := v_0.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - x := v.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 x (Xor64 i:(Const64 ) z)) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - i := v_1.Args[0] - if i.Op != OpConst64 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 x (Xor64 z i:(Const64 ))) - // cond: (z.Op != OpConst64 && x.Op != OpConst64) - // result: (Xor64 i (Xor64 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst64 { - break - } - t := i.Type - if !(z.Op != OpConst64 && x.Op != OpConst64) { - break - } - v.reset(OpXor64) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor64, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) - // cond: - // result: (Xor64 (Const64 [c^d]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - v_1_0 := v_1.Args[0] - if v_1_0.Op != OpConst64 { - break - } - if v_1_0.Type != t { - break - } - d := v_1_0.AuxInt - x := v_1.Args[1] - v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) + v.Type = x.Type v.AddArg(x) return true } - // match: (Xor64 (Const64 [c]) (Xor64 x (Const64 [d]))) - // cond: - // result: (Xor64 (Const64 [c^d]) x) + // match: (Xor64 x l:(Xor64 _ _)) + // cond: (x.Op != OpXor64 && x.Op != OpConst64) + // result: (Xor64 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst64 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor64 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst64 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpXor64 { break } - if v_1_1.Type != t { + if !(x.Op != OpXor64 && x.Op != OpConst64) { break } - d := v_1_1.AuxInt v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d - v.AddArg(v0) + v.AddArg(l) v.AddArg(x) return true } - // match: (Xor64 (Xor64 (Const64 [d]) x) (Const64 [c])) - // cond: - // result: (Xor64 (Const64 [c^d]) x) + // match: (Xor64 (Xor64 i:(Const64 ) z) x) + // cond: (z.Op != OpConst64 && x.Op != OpConst64) + // result: (Xor64 i (Xor64 z x)) for { v_0 := v.Args[0] if v_0.Op != OpXor64 { break } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst64 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + i := v_0.Args[0] + if i.Op != OpConst64 { break } - if v_1.Type != t { + t := i.Type + z := v_0.Args[1] + x := v.Args[1] + if !(z.Op != OpConst64 && x.Op != OpConst64) { break } - c := v_1.AuxInt v.reset(OpXor64) - v0 := b.NewValue0(v.Pos, OpConst64, t) - v0.AuxInt = c ^ d + v.AddArg(i) + v0 := b.NewValue0(v.Pos, OpXor64, t) + v0.AddArg(z) + v0.AddArg(x) v.AddArg(v0) - v.AddArg(x) return true } - // match: (Xor64 (Xor64 x (Const64 [d])) (Const64 [c])) + // match: (Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) // cond: // result: (Xor64 (Const64 [c^d]) x) for { v_0 := v.Args[0] - if v_0.Op != OpXor64 { + if v_0.Op != OpConst64 { break } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst64 { + t := v_0.Type + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpXor64 { break } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpConst64 { break } - if v_1.Type != t { + if v_1_0.Type != t { break } - c := v_1.AuxInt + d := v_1_0.AuxInt + x := v_1.Args[1] v.reset(OpXor64) v0 := b.NewValue0(v.Pos, OpConst64, t) v0.AuxInt = c ^ d @@ -22972,7 +17325,7 @@ func rewriteValuegeneric_OpXor64(v *Value) bool { func rewriteValuegeneric_OpXor8(v *Value) bool { b := v.Block _ = b - // match: (Xor8 (Const8 [c]) (Const8 [d])) + // match: (Xor8 (Const8 [c]) (Const8 [d])) // cond: // result: (Const8 [int64(int8(c^d))]) for { @@ -22990,25 +17343,28 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AuxInt = int64(int8(c ^ d)) return true } - // match: (Xor8 (Const8 [d]) (Const8 [c])) - // cond: - // result: (Const8 [int64(int8(c^d))]) + // match: (Xor8 x (Const8 [c])) + // cond: x.Op != OpConst8 + // result: (Xor8 (Const8 [c]) x) for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - d := v_0.AuxInt + x := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpConst8 { break } + t := v_1.Type c := v_1.AuxInt - v.reset(OpConst8) - v.AuxInt = int64(int8(c ^ d)) + if !(x.Op != OpConst8) { + break + } + v.reset(OpXor8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = c + v.AddArg(v0) + v.AddArg(x) return true } - // match: (Xor8 x x) + // match: (Xor8 x x) // cond: // result: (Const8 [0]) for { @@ -23020,7 +17376,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AuxInt = 0 return true } - // match: (Xor8 (Const8 [0]) x) + // match: (Xor8 (Const8 [0]) x) // cond: // result: x for { @@ -23037,24 +17393,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(x) return true } - // match: (Xor8 x (Const8 [0])) - // cond: - // result: x - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.AuxInt != 0 { - break - } - v.reset(OpCopy) - v.Type = x.Type - v.AddArg(x) - return true - } - // match: (Xor8 x (Xor8 x y)) + // match: (Xor8 x (Xor8 x y)) // cond: // result: y for { @@ -23072,7 +17411,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(y) return true } - // match: (Xor8 x (Xor8 y x)) + // match: (Xor8 x (Xor8 y x)) // cond: // result: y for { @@ -23090,7 +17429,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(y) return true } - // match: (Xor8 (Xor8 x y) x) + // match: (Xor8 (Xor8 x y) x) // cond: // result: y for { @@ -23108,51 +17447,42 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(y) return true } - // match: (Xor8 (Xor8 y x) x) + // match: (Xor8 (Xor8 x y) y) // cond: - // result: y + // result: x for { v_0 := v.Args[0] if v_0.Op != OpXor8 { break } - y := v_0.Args[0] - x := v_0.Args[1] - if x != v.Args[1] { + x := v_0.Args[0] + y := v_0.Args[1] + if y != v.Args[1] { break } v.reset(OpCopy) - v.Type = y.Type - v.AddArg(y) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Xor8 (Xor8 i:(Const8 ) z) x) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) + // match: (Xor8 x l:(Xor8 _ _)) + // cond: (x.Op != OpXor8 && x.Op != OpConst8) + // result: (Xor8 l x) for { - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - i := v_0.Args[0] - if i.Op != OpConst8 { + x := v.Args[0] + l := v.Args[1] + if l.Op != OpXor8 { break } - t := i.Type - z := v_0.Args[1] - x := v.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { + if !(x.Op != OpXor8 && x.Op != OpConst8) { break } v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) + v.AddArg(l) + v.AddArg(x) return true } - // match: (Xor8 (Xor8 z i:(Const8 )) x) + // match: (Xor8 (Xor8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Xor8 i (Xor8 z x)) for { @@ -23160,12 +17490,12 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { if v_0.Op != OpXor8 { break } - z := v_0.Args[0] - i := v_0.Args[1] + i := v_0.Args[0] if i.Op != OpConst8 { break } t := i.Type + z := v_0.Args[1] x := v.Args[1] if !(z.Op != OpConst8 && x.Op != OpConst8) { break @@ -23178,59 +17508,7 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(v0) return true } - // match: (Xor8 x (Xor8 i:(Const8 ) z)) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - i := v_1.Args[0] - if i.Op != OpConst8 { - break - } - t := i.Type - z := v_1.Args[1] - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor8 x (Xor8 z i:(Const8 ))) - // cond: (z.Op != OpConst8 && x.Op != OpConst8) - // result: (Xor8 i (Xor8 z x)) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - z := v_1.Args[0] - i := v_1.Args[1] - if i.Op != OpConst8 { - break - } - t := i.Type - if !(z.Op != OpConst8 && x.Op != OpConst8) { - break - } - v.reset(OpXor8) - v.AddArg(i) - v0 := b.NewValue0(v.Pos, OpXor8, t) - v0.AddArg(z) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) + // match: (Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) // cond: // result: (Xor8 (Const8 [int64(int8(c^d))]) x) for { @@ -23260,96 +17538,6 @@ func rewriteValuegeneric_OpXor8(v *Value) bool { v.AddArg(x) return true } - // match: (Xor8 (Const8 [c]) (Xor8 x (Const8 [d]))) - // cond: - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpConst8 { - break - } - t := v_0.Type - c := v_0.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpXor8 { - break - } - x := v_1.Args[0] - v_1_1 := v_1.Args[1] - if v_1_1.Op != OpConst8 { - break - } - if v_1_1.Type != t { - break - } - d := v_1_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor8 (Xor8 (Const8 [d]) x) (Const8 [c])) - // cond: - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - v_0_0 := v_0.Args[0] - if v_0_0.Op != OpConst8 { - break - } - t := v_0_0.Type - d := v_0_0.AuxInt - x := v_0.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } - // match: (Xor8 (Xor8 x (Const8 [d])) (Const8 [c])) - // cond: - // result: (Xor8 (Const8 [int64(int8(c^d))]) x) - for { - v_0 := v.Args[0] - if v_0.Op != OpXor8 { - break - } - x := v_0.Args[0] - v_0_1 := v_0.Args[1] - if v_0_1.Op != OpConst8 { - break - } - t := v_0_1.Type - d := v_0_1.AuxInt - v_1 := v.Args[1] - if v_1.Op != OpConst8 { - break - } - if v_1.Type != t { - break - } - c := v_1.AuxInt - v.reset(OpXor8) - v0 := b.NewValue0(v.Pos, OpConst8, t) - v0.AuxInt = int64(int8(c ^ d)) - v.AddArg(v0) - v.AddArg(x) - return true - } return false } func rewriteValuegeneric_OpZero(v *Value) bool { @@ -23512,7 +17700,7 @@ func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { - // match: (ZeroExt8to16 (Const8 [c])) + // match: (ZeroExt8to16 (Const8 [c])) // cond: // result: (Const16 [int64( uint8(c))]) for { @@ -23525,7 +17713,7 @@ func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { v.AuxInt = int64(uint8(c)) return true } - // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) + // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) // cond: s >= 8 // result: x for { @@ -23553,7 +17741,7 @@ func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { - // match: (ZeroExt8to32 (Const8 [c])) + // match: (ZeroExt8to32 (Const8 [c])) // cond: // result: (Const32 [int64( uint8(c))]) for { @@ -23566,7 +17754,7 @@ func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { v.AuxInt = int64(uint8(c)) return true } - // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) + // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) // cond: s >= 24 // result: x for { @@ -23594,7 +17782,7 @@ func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool { return false } func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool { - // match: (ZeroExt8to64 (Const8 [c])) + // match: (ZeroExt8to64 (Const8 [c])) // cond: // result: (Const64 [int64( uint8(c))]) for { @@ -23607,7 +17795,7 @@ func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool { v.AuxInt = int64(uint8(c)) return true } - // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) + // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) // cond: s >= 56 // result: x for { -- 2.48.1